Ben Gamari pushed to branch wip/T26166 at Glasgow Haskell Compiler / GHC

Commits:

1 changed file:

Changes:

  • rts/prim/atomic.c
    ... ... @@ -12,90 +12,66 @@
    12 12
     
    
    13 13
     // FetchAddByteArrayOp_Int
    
    14 14
     
    
    15
    -extern StgWord hs_atomic_add8(StgWord x, StgWord val);
    
    16
    -StgWord
    
    17
    -hs_atomic_add8(StgWord x, StgWord val)
    
    15
    +StgWord hs_atomic_add8(StgWord x, StgWord val)
    
    18 16
     {
    
    19 17
       return __sync_fetch_and_add((volatile StgWord8 *) x, (StgWord8) val);
    
    20 18
     }
    
    21 19
     
    
    22
    -extern StgWord hs_atomic_add16(StgWord x, StgWord val);
    
    23
    -StgWord
    
    24
    -hs_atomic_add16(StgWord x, StgWord val)
    
    20
    +StgWord hs_atomic_add16(StgWord x, StgWord val)
    
    25 21
     {
    
    26 22
       return __sync_fetch_and_add((volatile StgWord16 *) x, (StgWord16) val);
    
    27 23
     }
    
    28 24
     
    
    29
    -extern StgWord hs_atomic_add32(StgWord x, StgWord val);
    
    30
    -StgWord
    
    31
    -hs_atomic_add32(StgWord x, StgWord val)
    
    25
    +StgWord hs_atomic_add32(StgWord x, StgWord val)
    
    32 26
     {
    
    33 27
       return __sync_fetch_and_add((volatile StgWord32 *) x, (StgWord32) val);
    
    34 28
     }
    
    35 29
     
    
    36
    -extern StgWord64 hs_atomic_add64(StgWord x, StgWord64 val);
    
    37
    -StgWord64
    
    38
    -hs_atomic_add64(StgWord x, StgWord64 val)
    
    30
    +StgWord64 hs_atomic_add64(StgWord x, StgWord64 val)
    
    39 31
     {
    
    40 32
       return __sync_fetch_and_add((volatile StgWord64 *) x, val);
    
    41 33
     }
    
    42 34
     
    
    43 35
     // FetchSubByteArrayOp_Int
    
    44 36
     
    
    45
    -extern StgWord hs_atomic_sub8(StgWord x, StgWord val);
    
    46
    -StgWord
    
    47
    -hs_atomic_sub8(StgWord x, StgWord val)
    
    37
    +StgWord hs_atomic_sub8(StgWord x, StgWord val)
    
    48 38
     {
    
    49 39
       return __sync_fetch_and_sub((volatile StgWord8 *) x, (StgWord8) val);
    
    50 40
     }
    
    51 41
     
    
    52
    -extern StgWord hs_atomic_sub16(StgWord x, StgWord val);
    
    53
    -StgWord
    
    54
    -hs_atomic_sub16(StgWord x, StgWord val)
    
    42
    +StgWord hs_atomic_sub16(StgWord x, StgWord val)
    
    55 43
     {
    
    56 44
       return __sync_fetch_and_sub((volatile StgWord16 *) x, (StgWord16) val);
    
    57 45
     }
    
    58 46
     
    
    59
    -extern StgWord hs_atomic_sub32(StgWord x, StgWord val);
    
    60
    -StgWord
    
    61
    -hs_atomic_sub32(StgWord x, StgWord val)
    
    47
    +StgWord hs_atomic_sub32(StgWord x, StgWord val)
    
    62 48
     {
    
    63 49
       return __sync_fetch_and_sub((volatile StgWord32 *) x, (StgWord32) val);
    
    64 50
     }
    
    65 51
     
    
    66
    -extern StgWord64 hs_atomic_sub64(StgWord x, StgWord64 val);
    
    67
    -StgWord64
    
    68
    -hs_atomic_sub64(StgWord x, StgWord64 val)
    
    52
    +StgWord64 hs_atomic_sub64(StgWord x, StgWord64 val)
    
    69 53
     {
    
    70 54
       return __sync_fetch_and_sub((volatile StgWord64 *) x, val);
    
    71 55
     }
    
    72 56
     
    
    73 57
     // FetchAndByteArrayOp_Int
    
    74 58
     
    
    75
    -extern StgWord hs_atomic_and8(StgWord x, StgWord val);
    
    76
    -StgWord
    
    77
    -hs_atomic_and8(StgWord x, StgWord val)
    
    59
    +StgWord hs_atomic_and8(StgWord x, StgWord val)
    
    78 60
     {
    
    79 61
       return __sync_fetch_and_and((volatile StgWord8 *) x, (StgWord8) val);
    
    80 62
     }
    
    81 63
     
    
    82
    -extern StgWord hs_atomic_and16(StgWord x, StgWord val);
    
    83
    -StgWord
    
    84
    -hs_atomic_and16(StgWord x, StgWord val)
    
    64
    +StgWord hs_atomic_and16(StgWord x, StgWord val)
    
    85 65
     {
    
    86 66
       return __sync_fetch_and_and((volatile StgWord16 *) x, (StgWord16) val);
    
    87 67
     }
    
    88 68
     
    
    89
    -extern StgWord hs_atomic_and32(StgWord x, StgWord val);
    
    90
    -StgWord
    
    91
    -hs_atomic_and32(StgWord x, StgWord val)
    
    69
    +StgWord hs_atomic_and32(StgWord x, StgWord val)
    
    92 70
     {
    
    93 71
       return __sync_fetch_and_and((volatile StgWord32 *) x, (StgWord32) val);
    
    94 72
     }
    
    95 73
     
    
    96
    -extern StgWord64 hs_atomic_and64(StgWord x, StgWord64 val);
    
    97
    -StgWord64
    
    98
    -hs_atomic_and64(StgWord x, StgWord64 val)
    
    74
    +StgWord64 hs_atomic_and64(StgWord x, StgWord64 val)
    
    99 75
     {
    
    100 76
       return __sync_fetch_and_and((volatile StgWord64 *) x, val);
    
    101 77
     }
    
    ... ... @@ -167,9 +143,7 @@ hs_atomic_and64(StgWord x, StgWord64 val)
    167 143
     #pragma GCC diagnostic ignored "-Wsync-nand"
    
    168 144
     #endif
    
    169 145
     
    
    170
    -extern StgWord hs_atomic_nand8(StgWord x, StgWord val);
    
    171
    -StgWord
    
    172
    -hs_atomic_nand8(StgWord x, StgWord val)
    
    146
    +StgWord hs_atomic_nand8(StgWord x, StgWord val)
    
    173 147
     {
    
    174 148
     #if USE_SYNC_FETCH_AND_NAND
    
    175 149
       return __sync_fetch_and_nand((volatile StgWord8 *) x, (StgWord8) val);
    
    ... ... @@ -178,9 +152,7 @@ hs_atomic_nand8(StgWord x, StgWord val)
    178 152
     #endif
    
    179 153
     }
    
    180 154
     
    
    181
    -extern StgWord hs_atomic_nand16(StgWord x, StgWord val);
    
    182
    -StgWord
    
    183
    -hs_atomic_nand16(StgWord x, StgWord val)
    
    155
    +StgWord hs_atomic_nand16(StgWord x, StgWord val)
    
    184 156
     {
    
    185 157
     #if USE_SYNC_FETCH_AND_NAND
    
    186 158
       return __sync_fetch_and_nand((volatile StgWord16 *) x, (StgWord16) val);
    
    ... ... @@ -189,9 +161,7 @@ hs_atomic_nand16(StgWord x, StgWord val)
    189 161
     #endif
    
    190 162
     }
    
    191 163
     
    
    192
    -extern StgWord hs_atomic_nand32(StgWord x, StgWord val);
    
    193
    -StgWord
    
    194
    -hs_atomic_nand32(StgWord x, StgWord val)
    
    164
    +StgWord hs_atomic_nand32(StgWord x, StgWord val)
    
    195 165
     {
    
    196 166
     #if USE_SYNC_FETCH_AND_NAND
    
    197 167
       return __sync_fetch_and_nand((volatile StgWord32 *) x, (StgWord32) val);
    
    ... ... @@ -200,9 +170,7 @@ hs_atomic_nand32(StgWord x, StgWord val)
    200 170
     #endif
    
    201 171
     }
    
    202 172
     
    
    203
    -extern StgWord64 hs_atomic_nand64(StgWord x, StgWord64 val);
    
    204
    -StgWord64
    
    205
    -hs_atomic_nand64(StgWord x, StgWord64 val)
    
    173
    +StgWord64 hs_atomic_nand64(StgWord x, StgWord64 val)
    
    206 174
     {
    
    207 175
     #if USE_SYNC_FETCH_AND_NAND
    
    208 176
       return __sync_fetch_and_nand((volatile StgWord64 *) x, val);
    
    ... ... @@ -215,96 +183,72 @@ hs_atomic_nand64(StgWord x, StgWord64 val)
    215 183
     
    
    216 184
     // FetchOrByteArrayOp_Int
    
    217 185
     
    
    218
    -extern StgWord hs_atomic_or8(StgWord x, StgWord val);
    
    219
    -StgWord
    
    220
    -hs_atomic_or8(StgWord x, StgWord val)
    
    186
    +StgWord hs_atomic_or8(StgWord x, StgWord val)
    
    221 187
     {
    
    222 188
       return __sync_fetch_and_or((volatile StgWord8 *) x, (StgWord8) val);
    
    223 189
     }
    
    224 190
     
    
    225
    -extern StgWord hs_atomic_or16(StgWord x, StgWord val);
    
    226
    -StgWord
    
    227
    -hs_atomic_or16(StgWord x, StgWord val)
    
    191
    +StgWord hs_atomic_or16(StgWord x, StgWord val)
    
    228 192
     {
    
    229 193
       return __sync_fetch_and_or((volatile StgWord16 *) x, (StgWord16) val);
    
    230 194
     }
    
    231 195
     
    
    232
    -extern StgWord hs_atomic_or32(StgWord x, StgWord val);
    
    233
    -StgWord
    
    234
    -hs_atomic_or32(StgWord x, StgWord val)
    
    196
    +StgWord hs_atomic_or32(StgWord x, StgWord val)
    
    235 197
     {
    
    236 198
       return __sync_fetch_and_or((volatile StgWord32 *) x, (StgWord32) val);
    
    237 199
     }
    
    238 200
     
    
    239
    -extern StgWord64 hs_atomic_or64(StgWord x, StgWord64 val);
    
    240
    -StgWord64
    
    241
    -hs_atomic_or64(StgWord x, StgWord64 val)
    
    201
    +StgWord64 hs_atomic_or64(StgWord x, StgWord64 val)
    
    242 202
     {
    
    243 203
       return __sync_fetch_and_or((volatile StgWord64 *) x, val);
    
    244 204
     }
    
    245 205
     
    
    246 206
     // FetchXorByteArrayOp_Int
    
    247 207
     
    
    248
    -extern StgWord hs_atomic_xor8(StgWord x, StgWord val);
    
    249
    -StgWord
    
    250
    -hs_atomic_xor8(StgWord x, StgWord val)
    
    208
    +StgWord hs_atomic_xor8(StgWord x, StgWord val)
    
    251 209
     {
    
    252 210
       return __sync_fetch_and_xor((volatile StgWord8 *) x, (StgWord8) val);
    
    253 211
     }
    
    254 212
     
    
    255
    -extern StgWord hs_atomic_xor16(StgWord x, StgWord val);
    
    256
    -StgWord
    
    257
    -hs_atomic_xor16(StgWord x, StgWord val)
    
    213
    +StgWord hs_atomic_xor16(StgWord x, StgWord val)
    
    258 214
     {
    
    259 215
       return __sync_fetch_and_xor((volatile StgWord16 *) x, (StgWord16) val);
    
    260 216
     }
    
    261 217
     
    
    262
    -extern StgWord hs_atomic_xor32(StgWord x, StgWord val);
    
    263
    -StgWord
    
    264
    -hs_atomic_xor32(StgWord x, StgWord val)
    
    218
    +StgWord hs_atomic_xor32(StgWord x, StgWord val)
    
    265 219
     {
    
    266 220
       return __sync_fetch_and_xor((volatile StgWord32 *) x, (StgWord32) val);
    
    267 221
     }
    
    268 222
     
    
    269
    -extern StgWord64 hs_atomic_xor64(StgWord x, StgWord64 val);
    
    270
    -StgWord64
    
    271
    -hs_atomic_xor64(StgWord x, StgWord64 val)
    
    223
    +StgWord64 hs_atomic_xor64(StgWord x, StgWord64 val)
    
    272 224
     {
    
    273 225
       return __sync_fetch_and_xor((volatile StgWord64 *) x, val);
    
    274 226
     }
    
    275 227
     
    
    276 228
     // CasByteArrayOp_Int
    
    277 229
     
    
    278
    -extern StgWord hs_cmpxchg8(StgWord x, StgWord old, StgWord new);
    
    279
    -StgWord
    
    280
    -hs_cmpxchg8(StgWord x, StgWord old, StgWord new)
    
    230
    +StgWord hs_cmpxchg8(StgWord x, StgWord old, StgWord new)
    
    281 231
     {
    
    282 232
       StgWord8 expected = (StgWord8) old;
    
    283 233
       __atomic_compare_exchange_n((StgWord8 *) x, &expected, (StgWord8) new, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
    
    284 234
       return expected;
    
    285 235
     }
    
    286 236
     
    
    287
    -extern StgWord hs_cmpxchg16(StgWord x, StgWord old, StgWord new);
    
    288
    -StgWord
    
    289
    -hs_cmpxchg16(StgWord x, StgWord old, StgWord new)
    
    237
    +StgWord hs_cmpxchg16(StgWord x, StgWord old, StgWord new)
    
    290 238
     {
    
    291 239
       StgWord16 expected = (StgWord16) old;
    
    292 240
       __atomic_compare_exchange_n((StgWord16 *) x, &expected, (StgWord16) new, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
    
    293 241
       return expected;
    
    294 242
     }
    
    295 243
     
    
    296
    -extern StgWord hs_cmpxchg32(StgWord x, StgWord old, StgWord new);
    
    297
    -StgWord
    
    298
    -hs_cmpxchg32(StgWord x, StgWord old, StgWord new)
    
    244
    +StgWord hs_cmpxchg32(StgWord x, StgWord old, StgWord new)
    
    299 245
     {
    
    300 246
       StgWord32 expected = (StgWord32) old;
    
    301 247
       __atomic_compare_exchange_n((StgWord32 *) x, &expected, (StgWord32) new, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
    
    302 248
       return expected;
    
    303 249
     }
    
    304 250
     
    
    305
    -extern StgWord64 hs_cmpxchg64(StgWord x, StgWord64 old, StgWord64 new);
    
    306
    -StgWord64
    
    307
    -hs_cmpxchg64(StgWord x, StgWord64 old, StgWord64 new)
    
    251
    +StgWord64 hs_cmpxchg64(StgWord x, StgWord64 old, StgWord64 new)
    
    308 252
     {
    
    309 253
       StgWord64 expected = (StgWord64) old;
    
    310 254
       __atomic_compare_exchange_n((StgWord64 *) x, &expected, (StgWord64) new, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
    
    ... ... @@ -313,31 +257,23 @@ hs_cmpxchg64(StgWord x, StgWord64 old, StgWord64 new)
    313 257
     
    
    314 258
     // Atomic exchange operations
    
    315 259
     
    
    316
    -extern StgWord hs_xchg8(StgWord x, StgWord val);
    
    317
    -StgWord
    
    318
    -hs_xchg8(StgWord x, StgWord val)
    
    260
    +StgWord hs_xchg8(StgWord x, StgWord val)
    
    319 261
     {
    
    320 262
       return (StgWord) __atomic_exchange_n((StgWord8 *) x, (StgWord8) val, __ATOMIC_SEQ_CST);
    
    321 263
     }
    
    322 264
     
    
    323
    -extern StgWord hs_xchg16(StgWord x, StgWord val);
    
    324
    -StgWord
    
    325
    -hs_xchg16(StgWord x, StgWord val)
    
    265
    +StgWord hs_xchg16(StgWord x, StgWord val)
    
    326 266
     {
    
    327 267
       return (StgWord) __atomic_exchange_n((StgWord16 *)x, (StgWord16) val, __ATOMIC_SEQ_CST);
    
    328 268
     }
    
    329 269
     
    
    330
    -extern StgWord hs_xchg32(StgWord x, StgWord val);
    
    331
    -StgWord
    
    332
    -hs_xchg32(StgWord x, StgWord val)
    
    270
    +StgWord hs_xchg32(StgWord x, StgWord val)
    
    333 271
     {
    
    334 272
       return (StgWord) __atomic_exchange_n((StgWord32 *) x, (StgWord32) val, __ATOMIC_SEQ_CST);
    
    335 273
     }
    
    336 274
     
    
    337 275
     //GCC provides this even on 32bit, but StgWord is still 32 bits.
    
    338
    -extern StgWord64 hs_xchg64(StgWord x, StgWord64 val);
    
    339
    -StgWord64
    
    340
    -hs_xchg64(StgWord x, StgWord64 val)
    
    276
    +StgWord64 hs_xchg64(StgWord x, StgWord64 val)
    
    341 277
     {
    
    342 278
       return (StgWord64) __atomic_exchange_n((StgWord64 *) x, (StgWord64) val, __ATOMIC_SEQ_CST);
    
    343 279
     }
    
    ... ... @@ -352,30 +288,22 @@ hs_xchg64(StgWord x, StgWord64 val)
    352 288
     // primitives which the GCC documentation claims "usually" implies a full
    
    353 289
     // barrier.
    
    354 290
     
    
    355
    -extern StgWord hs_atomicread8(StgWord x);
    
    356
    -StgWord
    
    357
    -hs_atomicread8(StgWord x)
    
    291
    +StgWord hs_atomicread8(StgWord x)
    
    358 292
     {
    
    359 293
       return __atomic_load_n((StgWord8 *) x, __ATOMIC_SEQ_CST);
    
    360 294
     }
    
    361 295
     
    
    362
    -extern StgWord hs_atomicread16(StgWord x);
    
    363
    -StgWord
    
    364
    -hs_atomicread16(StgWord x)
    
    296
    +StgWord hs_atomicread16(StgWord x)
    
    365 297
     {
    
    366 298
       return __atomic_load_n((StgWord16 *) x, __ATOMIC_SEQ_CST);
    
    367 299
     }
    
    368 300
     
    
    369
    -extern StgWord hs_atomicread32(StgWord x);
    
    370
    -StgWord
    
    371
    -hs_atomicread32(StgWord x)
    
    301
    +StgWord hs_atomicread32(StgWord x)
    
    372 302
     {
    
    373 303
       return __atomic_load_n((StgWord32 *) x, __ATOMIC_SEQ_CST);
    
    374 304
     }
    
    375 305
     
    
    376
    -extern StgWord64 hs_atomicread64(StgWord x);
    
    377
    -StgWord64
    
    378
    -hs_atomicread64(StgWord x)
    
    306
    +StgWord64 hs_atomicread64(StgWord x)
    
    379 307
     {
    
    380 308
       return __atomic_load_n((StgWord64 *) x, __ATOMIC_SEQ_CST);
    
    381 309
     }
    
    ... ... @@ -384,30 +312,22 @@ hs_atomicread64(StgWord x)
    384 312
     // Implies a full memory barrier (see compiler/GHC/Builtin/primops.txt.pp)
    
    385 313
     // __ATOMIC_SEQ_CST: Full barrier (see hs_atomicread8 above).
    
    386 314
     
    
    387
    -extern void hs_atomicwrite8(StgWord x, StgWord val);
    
    388
    -void
    
    389
    -hs_atomicwrite8(StgWord x, StgWord val)
    
    315
    +void hs_atomicwrite8(StgWord x, StgWord val)
    
    390 316
     {
    
    391 317
       __atomic_store_n((StgWord8 *) x, (StgWord8) val, __ATOMIC_SEQ_CST);
    
    392 318
     }
    
    393 319
     
    
    394
    -extern void hs_atomicwrite16(StgWord x, StgWord val);
    
    395
    -void
    
    396
    -hs_atomicwrite16(StgWord x, StgWord val)
    
    320
    +void hs_atomicwrite16(StgWord x, StgWord val)
    
    397 321
     {
    
    398 322
       __atomic_store_n((StgWord16 *) x, (StgWord16) val, __ATOMIC_SEQ_CST);
    
    399 323
     }
    
    400 324
     
    
    401
    -extern void hs_atomicwrite32(StgWord x, StgWord val);
    
    402
    -void
    
    403
    -hs_atomicwrite32(StgWord x, StgWord val)
    
    325
    +void hs_atomicwrite32(StgWord x, StgWord val)
    
    404 326
     {
    
    405 327
       __atomic_store_n((StgWord32 *) x, (StgWord32) val, __ATOMIC_SEQ_CST);
    
    406 328
     }
    
    407 329
     
    
    408
    -extern void hs_atomicwrite64(StgWord x, StgWord64 val);
    
    409
    -void
    
    410
    -hs_atomicwrite64(StgWord x, StgWord64 val)
    
    330
    +void hs_atomicwrite64(StgWord x, StgWord64 val)
    
    411 331
     {
    
    412 332
       __atomic_store_n((StgWord64 *) x, (StgWord64) val, __ATOMIC_SEQ_CST);
    
    413 333
     }