| ... |
... |
@@ -204,12 +204,47 @@ stg_isMutableByteArrayWeaklyPinnedzh ( gcptr mba ) |
|
204
|
204
|
stg_shrinkMutableByteArrayzh ( gcptr mba, W_ new_size )
|
|
205
|
205
|
// MutableByteArray# s -> Int# -> State# s -> State# s
|
|
206
|
206
|
{
|
|
207
|
|
- ASSERT(new_size <= StgArrBytes_bytes(mba));
|
|
|
207
|
+ W_ old_size, old_wds, new_wds;
|
|
|
208
|
+ W_ bd;
|
|
|
209
|
+
|
|
|
210
|
+ old_size = StgArrBytes_bytes(mba);
|
|
|
211
|
+ ASSERT(new_size <= old_size);
|
|
|
212
|
+ old_wds = BYTES_TO_WDS(SIZEOF_StgArrBytes) + ROUNDUP_BYTES_TO_WDS(old_size);
|
|
|
213
|
+ new_wds = BYTES_TO_WDS(SIZEOF_StgArrBytes) + ROUNDUP_BYTES_TO_WDS(new_size);
|
|
|
214
|
+
|
|
|
215
|
+ // Try to shrink bd->free as well, to reclaim slop space at the end
|
|
|
216
|
+ // of current block and avoid unnecessary fragmentation. But we
|
|
|
217
|
+ // must guarantee that:
|
|
|
218
|
+ //
|
|
|
219
|
+ // 1. mba is already at the end of current block (check bd->free).
|
|
|
220
|
+ // Otherwise we can't move closures that come after it anyway.
|
|
|
221
|
+ // 2. It's a nursery block that belongs to the current Capability,
|
|
|
222
|
+ // so check rCurrentAlloc (used by allocateMightFail) or
|
|
|
223
|
+ // pinned_object_block (used by allocatePinned). There's also no
|
|
|
224
|
+ // point if it's an older generation block, the mutator won't
|
|
|
225
|
+ // allocate into those blocks anyway.
|
|
|
226
|
+ //
|
|
|
227
|
+ // If check fails, fall back to the conservative code path: just
|
|
|
228
|
+ // zero the slop and return.
|
|
|
229
|
+ bd = Bdescr(mba);
|
|
|
230
|
+ if (bdescr_free(bd) != mba + WDS(old_wds) ||
|
|
|
231
|
+ (bd != StgRegTable_rCurrentAlloc(BaseReg) && bd != Capability_pinned_object_block(MyCapability()))) {
|
|
|
232
|
+ OVERWRITING_CLOSURE_MUTABLE(mba, new_wds);
|
|
|
233
|
+ StgArrBytes_bytes(mba) = new_size;
|
|
|
234
|
+ // No need to call PROF_HEADER_CREATE. See Note [LDV profiling and resizing arrays]
|
|
|
235
|
+ return ();
|
|
|
236
|
+ }
|
|
208
|
237
|
|
|
209
|
|
- OVERWRITING_CLOSURE_MUTABLE(mba, (BYTES_TO_WDS(SIZEOF_StgArrBytes) +
|
|
210
|
|
- ROUNDUP_BYTES_TO_WDS(new_size)));
|
|
|
238
|
+ // Check passes, we can shrink bd->free! Also uninitialize the slop
|
|
|
239
|
+ // if zero_on_gc is enabled, to conform with debug RTS convention.
|
|
211
|
240
|
StgArrBytes_bytes(mba) = new_size;
|
|
212
|
|
- // No need to call PROF_HEADER_CREATE. See Note [LDV profiling and resizing arrays]
|
|
|
241
|
+ IF_DEBUG(zero_on_gc,
|
|
|
242
|
+ prim %memset(mba + WDS(new_wds),
|
|
|
243
|
+ 0xaa,
|
|
|
244
|
+ WDS(old_wds - new_wds),
|
|
|
245
|
+ 1);
|
|
|
246
|
+ );
|
|
|
247
|
+ bdescr_free(bd) = mba + WDS(new_wds);
|
|
213
|
248
|
|
|
214
|
249
|
return ();
|
|
215
|
250
|
}
|
| ... |
... |
@@ -223,18 +258,10 @@ stg_shrinkMutableByteArrayzh ( gcptr mba, W_ new_size ) |
|
223
|
258
|
stg_resizzeMutableByteArrayzh ( gcptr mba, W_ new_size )
|
|
224
|
259
|
// MutableByteArray# s -> Int# -> State# s -> (# State# s,MutableByteArray# s #)
|
|
225
|
260
|
{
|
|
226
|
|
- W_ new_size_wds;
|
|
227
|
|
-
|
|
228
|
|
- ASSERT(new_size >= 0);
|
|
229
|
|
-
|
|
230
|
|
- new_size_wds = ROUNDUP_BYTES_TO_WDS(new_size);
|
|
231
|
|
-
|
|
232
|
|
- if (new_size_wds <= BYTE_ARR_WDS(mba)) {
|
|
233
|
|
- OVERWRITING_CLOSURE_MUTABLE(mba, (BYTES_TO_WDS(SIZEOF_StgArrBytes) +
|
|
234
|
|
- new_size_wds));
|
|
235
|
|
- StgArrBytes_bytes(mba) = new_size;
|
|
236
|
|
- // No need to call PROF_HEADER_CREATE. See Note [LDV profiling and resizing arrays]
|
|
|
261
|
+ ASSERT(new_size `ge` 0);
|
|
237
|
262
|
|
|
|
263
|
+ if (new_size <= StgArrBytes_bytes(mba)) {
|
|
|
264
|
+ call stg_shrinkMutableByteArrayzh(mba, new_size);
|
|
238
|
265
|
return (mba);
|
|
239
|
266
|
} else {
|
|
240
|
267
|
(P_ new_mba) = call stg_newByteArrayzh(new_size);
|