| ... |
... |
@@ -606,7 +606,7 @@ static |
|
606
|
606
|
bool check_in_nonmoving_heap(StgClosure *p) {
|
|
607
|
607
|
if (HEAP_ALLOCED_GC(p)) {
|
|
608
|
608
|
// This works for both large and small objects:
|
|
609
|
|
- return Bdescr((P_)p)->flags & BF_NONMOVING;
|
|
|
609
|
+ return block_get_flags(Bdescr((P_)p)) & BF_NONMOVING;
|
|
610
|
610
|
} else {
|
|
611
|
611
|
return true; // a static object
|
|
612
|
612
|
}
|
| ... |
... |
@@ -619,7 +619,7 @@ inline void updateRemembSetPushThunk(Capability *cap, StgThunk *thunk) |
|
619
|
619
|
{
|
|
620
|
620
|
const StgInfoTable *info;
|
|
621
|
621
|
do {
|
|
622
|
|
- info = *(StgInfoTable* volatile*) &thunk->header.info;
|
|
|
622
|
+ info = (StgInfoTable*) RELAXED_LOAD(&thunk->header.info);
|
|
623
|
623
|
} while (info == &stg_WHITEHOLE_info);
|
|
624
|
624
|
|
|
625
|
625
|
const StgThunkInfoTable *thunk_info = THUNK_INFO_PTR_TO_STRUCT(info);
|
| ... |
... |
@@ -722,13 +722,14 @@ STATIC_INLINE bool needs_upd_rem_set_mark(StgClosure *p) |
|
722
|
722
|
{
|
|
723
|
723
|
// TODO: Deduplicate with mark_closure
|
|
724
|
724
|
bdescr *bd = Bdescr((StgPtr) p);
|
|
|
725
|
+ uint16_t flags = block_get_flags(bd);
|
|
725
|
726
|
if (bd->gen != oldest_gen) {
|
|
726
|
727
|
return false;
|
|
727
|
|
- } else if (bd->flags & BF_LARGE) {
|
|
728
|
|
- if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
|
|
|
728
|
+ } else if (flags & BF_LARGE) {
|
|
|
729
|
+ if (! (flags & BF_NONMOVING_SWEEPING)) {
|
|
729
|
730
|
return false;
|
|
730
|
731
|
} else {
|
|
731
|
|
- return ! (bd->flags & BF_MARKED);
|
|
|
732
|
+ return ! (flags & BF_MARKED);
|
|
732
|
733
|
}
|
|
733
|
734
|
} else {
|
|
734
|
735
|
struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
|
| ... |
... |
@@ -740,8 +741,8 @@ STATIC_INLINE bool needs_upd_rem_set_mark(StgClosure *p) |
|
740
|
741
|
static void finish_upd_rem_set_mark_large(bdescr* bd) {
|
|
741
|
742
|
// Someone else may have already marked it.
|
|
742
|
743
|
ACQUIRE_LOCK(&nonmoving_large_objects_mutex);
|
|
743
|
|
- if (! (bd->flags & BF_MARKED)) {
|
|
744
|
|
- bd->flags |= BF_MARKED;
|
|
|
744
|
+ if (! (block_get_flags(bd) & BF_MARKED)) {
|
|
|
745
|
+ block_set_flag(bd, BF_MARKED);
|
|
745
|
746
|
dbl_link_remove(bd, &nonmoving_large_objects);
|
|
746
|
747
|
dbl_link_onto(bd, &nonmoving_marked_large_objects);
|
|
747
|
748
|
n_nonmoving_large_blocks -= bd->blocks;
|
| ... |
... |
@@ -754,7 +755,7 @@ static void finish_upd_rem_set_mark_large(bdescr* bd) { |
|
754
|
755
|
STATIC_INLINE void finish_upd_rem_set_mark(StgClosure *p)
|
|
755
|
756
|
{
|
|
756
|
757
|
bdescr *bd = Bdescr((StgPtr) p);
|
|
757
|
|
- if (bd->flags & BF_LARGE) {
|
|
|
758
|
+ if (block_get_flags(bd) & BF_LARGE) {
|
|
758
|
759
|
// This function is extracted so that this function can be inline
|
|
759
|
760
|
finish_upd_rem_set_mark_large(bd);
|
|
760
|
761
|
} else {
|
| ... |
... |
@@ -1343,7 +1344,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin) |
|
1343
|
1344
|
goto done;
|
|
1344
|
1345
|
|
|
1345
|
1346
|
case WHITEHOLE:
|
|
1346
|
|
- while (*(StgInfoTable* volatile*) &p->header.info == &stg_WHITEHOLE_info)
|
|
|
1347
|
+ while (RELAXED_LOAD(&p->header.info) == &stg_WHITEHOLE_info)
|
|
1347
|
1348
|
#if defined(PARALLEL_GC)
|
|
1348
|
1349
|
busy_wait_nop()
|
|
1349
|
1350
|
#endif
|
| ... |
... |
@@ -1377,35 +1378,36 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin) |
|
1377
|
1378
|
|
|
1378
|
1379
|
// N.B. only the first block of a compact region is guaranteed to carry
|
|
1379
|
1380
|
// BF_NONMOVING; consequently we must separately check for BF_COMPACT.
|
|
1380
|
|
- if (bd->flags & (BF_COMPACT | BF_NONMOVING)) {
|
|
|
1381
|
+ const uint16_t flags = block_get_flags(bd);
|
|
|
1382
|
+ if (flags & (BF_COMPACT | BF_NONMOVING)) {
|
|
1381
|
1383
|
|
|
1382
|
|
- if (bd->flags & BF_COMPACT) {
|
|
|
1384
|
+ if (flags & BF_COMPACT) {
|
|
1383
|
1385
|
StgCompactNFData *str = objectGetCompact((StgClosure*)p);
|
|
1384
|
1386
|
bd = Bdescr((P_)str);
|
|
1385
|
1387
|
|
|
1386
|
|
- if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
|
|
|
1388
|
+ if (! (flags & BF_NONMOVING_SWEEPING)) {
|
|
1387
|
1389
|
// Not in the snapshot
|
|
1388
|
1390
|
return;
|
|
1389
|
1391
|
}
|
|
1390
|
1392
|
|
|
1391
|
|
- if (! (bd->flags & BF_MARKED)) {
|
|
|
1393
|
+ if (! (flags & BF_MARKED)) {
|
|
1392
|
1394
|
dbl_link_remove(bd, &nonmoving_compact_objects);
|
|
1393
|
1395
|
dbl_link_onto(bd, &nonmoving_marked_compact_objects);
|
|
1394
|
1396
|
StgWord blocks = str->totalW / BLOCK_SIZE_W;
|
|
1395
|
1397
|
n_nonmoving_compact_blocks -= blocks;
|
|
1396
|
1398
|
n_nonmoving_marked_compact_blocks += blocks;
|
|
1397
|
|
- bd->flags |= BF_MARKED;
|
|
|
1399
|
+ block_set_flag(bd, BF_MARKED);
|
|
1398
|
1400
|
}
|
|
1399
|
1401
|
|
|
1400
|
1402
|
// N.B. the object being marked is in a compact region so by
|
|
1401
|
1403
|
// definition there is no need to do any tracing here.
|
|
1402
|
1404
|
goto done;
|
|
1403
|
|
- } else if (bd->flags & BF_LARGE) {
|
|
1404
|
|
- if (! (bd->flags & BF_NONMOVING_SWEEPING)) {
|
|
|
1405
|
+ } else if (flags & BF_LARGE) {
|
|
|
1406
|
+ if (! (flags & BF_NONMOVING_SWEEPING)) {
|
|
1405
|
1407
|
// Not in the snapshot
|
|
1406
|
1408
|
goto done;
|
|
1407
|
1409
|
}
|
|
1408
|
|
- if (bd->flags & BF_MARKED) {
|
|
|
1410
|
+ if (flags & BF_MARKED) {
|
|
1409
|
1411
|
goto done;
|
|
1410
|
1412
|
}
|
|
1411
|
1413
|
} else {
|
| ... |
... |
@@ -1713,7 +1715,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin) |
|
1713
|
1715
|
break;
|
|
1714
|
1716
|
|
|
1715
|
1717
|
case WHITEHOLE:
|
|
1716
|
|
- while (*(StgInfoTable* volatile*) &p->header.info == &stg_WHITEHOLE_info);
|
|
|
1718
|
+ while ((StgInfoTable *) RELAXED_LOAD(&p->header.info) == &stg_WHITEHOLE_info);
|
|
1717
|
1719
|
goto try_again;
|
|
1718
|
1720
|
|
|
1719
|
1721
|
case COMPACT_NFDATA:
|
| ... |
... |
@@ -1737,24 +1739,25 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin) |
|
1737
|
1739
|
* the object's pointers since in the case of marking stacks there may be a
|
|
1738
|
1740
|
* mutator waiting for us to finish so it can start execution.
|
|
1739
|
1741
|
*/
|
|
1740
|
|
- if (bd->flags & BF_LARGE) {
|
|
|
1742
|
+ uint16_t bd_flags = block_get_flags(bd);
|
|
|
1743
|
+ if (bd_flags & BF_LARGE) {
|
|
1741
|
1744
|
/* Marking a large object isn't idempotent since we move it to
|
|
1742
|
1745
|
* nonmoving_marked_large_objects; to ensure that we don't repeatedly
|
|
1743
|
1746
|
* mark a large object, we only set BF_MARKED on large objects in the
|
|
1744
|
1747
|
* nonmoving heap while holding nonmoving_large_objects_mutex
|
|
1745
|
1748
|
*/
|
|
1746
|
1749
|
ACQUIRE_LOCK(&nonmoving_large_objects_mutex);
|
|
1747
|
|
- if (! (bd->flags & BF_MARKED)) {
|
|
|
1750
|
+ if (! (bd_flags & BF_MARKED)) {
|
|
1748
|
1751
|
// Remove the object from nonmoving_large_objects and link it to
|
|
1749
|
1752
|
// nonmoving_marked_large_objects
|
|
1750
|
1753
|
dbl_link_remove(bd, &nonmoving_large_objects);
|
|
1751
|
1754
|
dbl_link_onto(bd, &nonmoving_marked_large_objects);
|
|
1752
|
1755
|
n_nonmoving_large_blocks -= bd->blocks;
|
|
1753
|
1756
|
n_nonmoving_marked_large_blocks += bd->blocks;
|
|
1754
|
|
- bd->flags |= BF_MARKED;
|
|
|
1757
|
+ block_set_flag(bd, BF_MARKED);
|
|
1755
|
1758
|
}
|
|
1756
|
1759
|
RELEASE_LOCK(&nonmoving_large_objects_mutex);
|
|
1757
|
|
- } else if (bd->flags & BF_NONMOVING) {
|
|
|
1760
|
+ } else if (bd_flags & BF_NONMOVING) {
|
|
1758
|
1761
|
// TODO: Kill repetition
|
|
1759
|
1762
|
struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
|
|
1760
|
1763
|
nonmoving_block_idx block_idx = nonmovingGetBlockIdx((StgPtr) p);
|
| ... |
... |
@@ -1769,7 +1772,7 @@ mark_closure (MarkQueue *queue, const StgClosure *p0, StgClosure **origin) |
|
1769
|
1772
|
}
|
|
1770
|
1773
|
|
|
1771
|
1774
|
done:
|
|
1772
|
|
- if (origin != NULL && (!HEAP_ALLOCED(p) || bd->flags & BF_NONMOVING)) {
|
|
|
1775
|
+ if (origin != NULL && (!HEAP_ALLOCED(p) || block_get_flags(bd) & BF_NONMOVING)) {
|
|
1773
|
1776
|
if (UNTAG_CLOSURE((StgClosure*)p0) != p && *origin == p0) {
|
|
1774
|
1777
|
if (cas((StgVolatilePtr)origin, (StgWord)p0, (StgWord)TAG_CLOSURE(tag, p)) == (StgWord)p0) {
|
|
1775
|
1778
|
// debugBelch("Thunk optimization successful\n");
|
| ... |
... |
@@ -1866,19 +1869,20 @@ bool nonmovingIsAlive (StgClosure *p) |
|
1866
|
1869
|
}
|
|
1867
|
1870
|
|
|
1868
|
1871
|
bdescr *bd = Bdescr((P_)p);
|
|
|
1872
|
+ uint16_t bd_flags = block_get_flags(bd);
|
|
1869
|
1873
|
|
|
1870
|
1874
|
// All non-static objects in the non-moving heap should be marked as
|
|
1871
|
1875
|
// BF_NONMOVING
|
|
1872
|
|
- ASSERT(bd->flags & BF_NONMOVING);
|
|
|
1876
|
+ ASSERT(bd_flags & BF_NONMOVING);
|
|
1873
|
1877
|
|
|
1874
|
|
- if (bd->flags & (BF_COMPACT | BF_LARGE)) {
|
|
1875
|
|
- if (bd->flags & BF_COMPACT) {
|
|
|
1878
|
+ if (bd_flags & (BF_COMPACT | BF_LARGE)) {
|
|
|
1879
|
+ if (bd_flags & BF_COMPACT) {
|
|
1876
|
1880
|
StgCompactNFData *str = objectGetCompact((StgClosure*)p);
|
|
1877
|
1881
|
bd = Bdescr((P_)str);
|
|
1878
|
1882
|
}
|
|
1879
|
|
- return (bd->flags & BF_NONMOVING_SWEEPING) == 0
|
|
|
1883
|
+ return (bd_flags & BF_NONMOVING_SWEEPING) == 0
|
|
1880
|
1884
|
// the large object wasn't in the snapshot and therefore wasn't marked
|
|
1881
|
|
- || (bd->flags & BF_MARKED) != 0;
|
|
|
1885
|
+ || (bd_flags & BF_MARKED) != 0;
|
|
1882
|
1886
|
// The object was marked
|
|
1883
|
1887
|
} else {
|
|
1884
|
1888
|
struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
|
| ... |
... |
@@ -1932,8 +1936,8 @@ static bool nonmovingIsNowAlive (StgClosure *p) |
|
1932
|
1936
|
}
|
|
1933
|
1937
|
|
|
1934
|
1938
|
bdescr *bd = Bdescr((P_)p);
|
|
|
1939
|
+ const uint16_t flags = block_get_flags(bd);
|
|
1935
|
1940
|
|
|
1936
|
|
- const uint16_t flags = bd->flags;
|
|
1937
|
1941
|
if (flags & BF_LARGE) {
|
|
1938
|
1942
|
if (flags & BF_PINNED && !(flags & BF_NONMOVING)) {
|
|
1939
|
1943
|
// In this case we have a pinned object living in a non-full
|
| ... |
... |
@@ -1943,15 +1947,15 @@ static bool nonmovingIsNowAlive (StgClosure *p) |
|
1943
|
1947
|
return true;
|
|
1944
|
1948
|
}
|
|
1945
|
1949
|
|
|
1946
|
|
- ASSERT(bd->flags & BF_NONMOVING);
|
|
1947
|
|
- return (bd->flags & BF_NONMOVING_SWEEPING) == 0
|
|
|
1950
|
+ ASSERT(flags & BF_NONMOVING);
|
|
|
1951
|
+ return (flags & BF_NONMOVING_SWEEPING) == 0
|
|
1948
|
1952
|
// the large object wasn't in the snapshot and therefore wasn't marked
|
|
1949
|
|
- || (bd->flags & BF_MARKED) != 0;
|
|
|
1953
|
+ || (flags & BF_MARKED) != 0;
|
|
1950
|
1954
|
// The object was marked
|
|
1951
|
1955
|
} else {
|
|
1952
|
1956
|
// All non-static objects in the non-moving heap should be marked as
|
|
1953
|
1957
|
// BF_NONMOVING.
|
|
1954
|
|
- ASSERT(bd->flags & BF_NONMOVING);
|
|
|
1958
|
+ ASSERT(flags & BF_NONMOVING);
|
|
1955
|
1959
|
|
|
1956
|
1960
|
struct NonmovingSegment *seg = nonmovingGetSegment((StgPtr) p);
|
|
1957
|
1961
|
StgClosure *snapshot_loc =
|
| ... |
... |
@@ -2014,7 +2018,8 @@ bool nonmovingTidyWeaks (struct MarkQueue_ *queue) |
|
2014
|
2018
|
|
|
2015
|
2019
|
// See Note [Weak pointer processing and the non-moving GC] in
|
|
2016
|
2020
|
// MarkWeak.c
|
|
2017
|
|
- bool key_in_nonmoving = HEAP_ALLOCED_GC(w->key) && Bdescr((StgPtr) w->key)->flags & BF_NONMOVING;
|
|
|
2021
|
+ bdescr *key_bd = Bdescr((StgPtr) w->key);
|
|
|
2022
|
+ bool key_in_nonmoving = HEAP_ALLOCED_GC(w->key) && block_get_flags(key_bd) & BF_NONMOVING;
|
|
2018
|
2023
|
if (!key_in_nonmoving || nonmovingIsNowAlive(w->key)) {
|
|
2019
|
2024
|
nonmovingMarkLiveWeak(queue, w);
|
|
2020
|
2025
|
did_work = true;
|