Bug Summary

File:platform/mac/avmshell/../../../MMgc/GC.cpp
Location:line 894, column 13
Description:Array access (from variable 'allocs') results in a null pointer dereference

Annotated Source Code

1/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3/* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 *
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
10 *
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
15 *
16 * The Original Code is [Open Source Virtual Machine.].
17 *
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2006
21 * the Initial Developer. All Rights Reserved.
22 *
23 * Contributor(s):
24 * Adobe AS3 Team
25 * leon.sha@sun.com
26 *
27 * Alternatively, the contents of this file may be used under the terms of
28 * either the GNU General Public License Version 2 or later (the "GPL"), or
29 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
30 * in which case the provisions of the GPL or the LGPL are applicable instead
31 * of those above. If you wish to allow use of your version of this file only
32 * under the terms of either the GPL or the LGPL, and not to allow others to
33 * use your version of this file under the terms of the MPL, indicate your
34 * decision by deleting the provisions above and replace them with the notice
35 * and other provisions required by the GPL or the LGPL. If you do not delete
36 * the provisions above, a recipient may use your version of this file under
37 * the terms of any one of the MPL, the GPL or the LGPL.
38 *
39 * ***** END LICENSE BLOCK ***** */
40
41#include "MMgc.h"
42#include "StaticAssert.h"
43
44#ifdef AVMPLUS_SAMPLER
45 //sampling support
46#include "avmplus.h"
47#else
48#define SAMPLE_FRAME(_x, _s)
49#define SAMPLE_CHECK()
50#endif
51
52namespace MMgc
53{
54#ifdef MMGC_MEMORY_PROFILER
55 // get detailed info on each size class allocators
56 const boolbool dumpSizeClassState = falsefalse;
57#endif
58
59 /*virtual*/
60 void GCCallback::presweep() {}
61
62 /*virtual*/
63 void GCCallback::postsweep() {}
64
65 /*virtual*/
66 void GCCallback::prereap() {}
67
68 /*virtual*/
69 void GCCallback::postreap() {}
70
71 /*virtual*/
72 void GCCallback::prereap(void* /*rcobj*/) {}
73
74 ////////////// GC //////////////////////////////////////////////////////////
75
76 // Size classes for our GC. From 8 to 128, size classes are spaced
77 // evenly 8 bytes apart. The finest granularity we can achieve is
78 // 8 bytes, as pointers must be 8-byte aligned thanks to our use
79 // of the bottom 3 bits of 32-bit atoms for Special Purposes.
80 // Above that, the size classes have been chosen to maximize the
81 // number of items that fit in a 4096-byte block, given our block
82 // header information.
83 const int16_t GC::kSizeClasses[kNumSizeClasses] = {
84 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, //0-9
85 88, 96, 104, 112, 120, 128, 144, 160, 168, 176, //10-19
86 184, 192, 200, 216, 224, 240, 256, 280, 296, 328, //20-29
87 352, 392, 432, 488, 560, 656, 784, 984, 1312, 1968 //30-39
88 };
89
90 /* kSizeClassIndex[] generated with this code:
91 kSizeClassIndex[0] = 0;
92 for (var i:int = 1; i < kNumSizeClasses; i++)
93 for (var j:int = (kSizeClasses[i-1]>>3), n=(kSizeClasses[i]>>3); j < n; j++)
94 kSizeClassIndex[j] = i;
95 */
96
97 // index'd by size>>3 - 1
98 const uint8_t GC::kSizeClassIndex[246] = {
99 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
100 10, 11, 12, 13, 14, 15, 16, 16, 17, 17,
101 18, 19, 20, 21, 22, 23, 23, 24, 25, 25,
102 26, 26, 27, 27, 27, 28, 28, 29, 29, 29,
103 29, 30, 30, 30, 31, 31, 31, 31, 31, 32,
104 32, 32, 32, 32, 33, 33, 33, 33, 33, 33,
105 33, 34, 34, 34, 34, 34, 34, 34, 34, 34,
106 35, 35, 35, 35, 35, 35, 35, 35, 35, 35,
107 35, 35, 36, 36, 36, 36, 36, 36, 36, 36,
108 36, 36, 36, 36, 36, 36, 36, 36, 37, 37,
109 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
110 37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
111 37, 37, 37, 38, 38, 38, 38, 38, 38, 38,
112 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
113 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
114 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
115 38, 38, 38, 38, 39, 39, 39, 39, 39, 39,
116 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
117 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
118 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
119 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
120 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
121 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
122 39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
123 39, 39, 39, 39, 39, 39
124 };
125
126#ifdef _MSC_VER
127# pragma warning(push)
128# pragma warning(disable:4355) // 'this': used in base member initializer list
129#endif
130
131 GC::GC(GCHeap *gcheap, GCConfig& config)
132 :
133 greedy(config.mode == GCConfig::kGreedyGC),
134 nogc(config.mode == GCConfig::kDisableGC),
135 incremental(config.mode == GCConfig::kIncrementalGC),
136 drcEnabled(config.mode != GCConfig::kDisableGC && config.drc),
137 findUnmarkedPointers(falsefalse),
138#ifdef DEBUG
139 validateDefRef(config.validateDRC),
140 keepDRCHistory(config.validateDRC),
141#endif
142 dontAddToZCTDuringCollection(falsefalse),
143 incrementalValidation(config.incrementalValidation),
144#ifdef _DEBUG
145 // check for missing write barriers at every Alloc
146 incrementalValidationPedantic(falsefalse),
147#endif
148 rcRootSegments(NULL__null),
149 policy(this, gcheap, config),
150 t0(VMPI_getPerformanceCounter()),
151 lastStartMarkIncrementCount(0),
152 sweeps(0),
153 sweepStart(0),
154 emptyWeakRef(0),
155 m_gcThread(0),
156 destroying(falsefalse),
157 marking(falsefalse),
158 collecting(falsefalse),
159 performingDRCValidationTrace(falsefalse),
160 presweeping(falsefalse),
161 markerActive(0),
162 stackCleaned(truetrue),
163 rememberedStackTop(0),
164 stackEnter(0),
165 enterCount(0),
166#ifdef VMCFG_SELECTABLE_EXACT_TRACING
167 runtimeSelectableExactnessFlag(config.exactTracing ? kVirtualGCTrace : 0),
168#endif
169#ifdef MMGC_MARKSTACK_ALLOWANCE
170 m_incrementalWork(config.markstackAllowance),
171 m_barrierWork(config.markstackAllowance),
172#endif
173 m_markStackOverflow(falsefalse),
174 mark_item_recursion_control(20), // About 3KB as measured with GCC 4.1 on MacOS X (144 bytes / frame), May 2009
175 sizeClassIndex(kSizeClassIndex), // see comment in GC.h
176 pageMap(),
177 heap(gcheap),
178 finalizedValue(truetrue),
179 smallEmptyPageList(NULL__null),
180 largeEmptyPageList(NULL__null),
181 remainingQuickListBudget(0),
182 victimIterator(0),
183 m_roots(0),
184 m_callbacks(0),
185 zct(),
186 lockedObjects(NULL__null)
187#ifdef MMGC_CONSERVATIVE_PROFILER
188 , demos(0)
189#endif
190#ifdef MMGC_WEAKREF_PROFILER
191 , weaklings(0)
192#endif
193#ifdef MMGC_DELETION_PROFILER
194 , deletos(0)
195#endif
196#ifdef DEBUGGER
197 , m_sampler(NULL__null)
198#endif
199 {
200 // sanity check for all our types
201 MMGC_STATIC_ASSERT(sizeof(int8_t) == 1)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(int8_t) == 1)>)> MMgc_static_assert_line_201
;
202 MMGC_STATIC_ASSERT(sizeof(uint8_t) == 1)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(uint8_t) == 1)>)> MMgc_static_assert_line_202
;
203 MMGC_STATIC_ASSERT(sizeof(int16_t) == 2)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(int16_t) == 2)>)> MMgc_static_assert_line_203
;
204 MMGC_STATIC_ASSERT(sizeof(uint16_t) == 2)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(uint16_t) == 2)>)> MMgc_static_assert_line_204
;
205 MMGC_STATIC_ASSERT(sizeof(int32_t) == 4)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(int32_t) == 4)>)> MMgc_static_assert_line_205
;
206 MMGC_STATIC_ASSERT(sizeof(uint32_t) == 4)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(uint32_t) == 4)>)> MMgc_static_assert_line_206
;
207 MMGC_STATIC_ASSERT(sizeof(int64_t) == 8)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(int64_t) == 8)>)> MMgc_static_assert_line_207
;
208 MMGC_STATIC_ASSERT(sizeof(uint64_t) == 8)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(uint64_t) == 8)>)> MMgc_static_assert_line_208
;
209 MMGC_STATIC_ASSERT(sizeof(intptr_t) == sizeof(void *))typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(intptr_t) == sizeof(void *))>)> MMgc_static_assert_line_209
;
210 MMGC_STATIC_ASSERT(sizeof(uintptr_t) == sizeof(void *))typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(uintptr_t) == sizeof(void *))>)> MMgc_static_assert_line_210
;
211 #ifdef MMGC_64BIT
212 MMGC_STATIC_ASSERT(sizeof(intptr_t) == 8)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(intptr_t) == 8)>)> MMgc_static_assert_line_212
;
213 MMGC_STATIC_ASSERT(sizeof(uintptr_t) == 8)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(uintptr_t) == 8)>)> MMgc_static_assert_line_213
;
214 #else
215 MMGC_STATIC_ASSERT(sizeof(intptr_t) == 4)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(intptr_t) == 4)>)> MMgc_static_assert_line_215
;
216 MMGC_STATIC_ASSERT(sizeof(uintptr_t) == 4)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(uintptr_t) == 4)>)> MMgc_static_assert_line_216
;
217 #endif
218 MMGC_STATIC_ASSERT(sizeof(GCLargeAlloc::LargeBlock) % 8 == 0)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(GCLargeAlloc::LargeBlock) % 8 == 0)>)>
MMgc_static_assert_line_218
;
219 MMGC_STATIC_ASSERT(sizeof(gcbits_t) == 1)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(sizeof(gcbits_t) == 1)>)> MMgc_static_assert_line_219
;
220
221 // Policy code relies on INT_MAX being 32 bit int max.
222 MMGC_STATIC_ASSERT(INT_MAX == 0x7fffffff)typedef ::MMgc::static_assert_MMgc<sizeof (::MMgc::STATIC_ASSERTION_FAILED
<(bool)(2147483647 == 0x7fffffff)>)> MMgc_static_assert_line_222
;
223
224 // The size tables above are derived based on a block size of 4096; this
225 // assert keeps us honest. Talk to Lars if you get into trouble here.
226 //
227 // Also it appears that some DEBUG-mode guards in SetPageMapValue,
228 // GetPageMapValue, ClearPageMapValue know that the block size is 4096.
229 GCAssert(GCHeap::kBlockSize == 4096)do { } while (0);
230
231 GCAssert(!(greedy && incremental))do { } while (0);
232 zct.SetGC(this);
233
234 VMPI_lockInit(&m_gcLock);
235 VMPI_lockInit(&m_rootListLock);
236
237 // Global budget for blocks on the quick lists of small-object allocators. This
238 // budget serves as a throttle on the free lists during times when the GC is not
239 // running; when the GC is running the periodic flushing of the quick lists
240 // makes the budget irrelevant. The budget is global so that very active allocators
241 // can have long free lists at the expense of inactive allocators, which must have
242 // short ones.
243 //
244 // This is somewhat arbitrary: 4 blocks per allocator. Must initialize this before we
245 // create allocators because they will request a budget when they're created. The
246 // total budget /must/ accomodate at least one block per alloc, and makes very little
247 // sense if it is not at least two blocks per alloc. (Note no memory is allocated, it's
248 // just a budget.)
249 //
250 // See comment in GCAlloc.cpp for more about the quick lists.
251
252 remainingQuickListBudget = (3*kNumSizeClasses) * 4 * GCHeap::kBlockSize;
253
254 // Create all the allocators up front (not lazy)
255 // so that we don't have to check the pointers for
256 // NULL on every allocation.
257 for (int i=0; i<kNumSizeClasses; i++) {
258 containsPointersAllocs[i] = mmfx_new(GCAlloc(this, kSizeClasses[i], true, false, i))new (MMgc::kUseFixedMalloc) GCAlloc(this, kSizeClasses[i], true
, false, i)
;
259 containsPointersRCAllocs[i] = mmfx_new(GCAlloc(this, kSizeClasses[i], true, true, i))new (MMgc::kUseFixedMalloc) GCAlloc(this, kSizeClasses[i], true
, true, i)
;
260 noPointersAllocs[i] = mmfx_new(GCAlloc(this, kSizeClasses[i], false, false, i))new (MMgc::kUseFixedMalloc) GCAlloc(this, kSizeClasses[i], false
, false, i)
;
261 }
262
263 largeAlloc = mmfx_new(GCLargeAlloc(this))new (MMgc::kUseFixedMalloc) GCLargeAlloc(this);
264
265 // See comment in GC.h
266 allocsTable[kRCObject] = containsPointersRCAllocs;
267 allocsTable[kContainsPointers] = containsPointersAllocs;
268 allocsTable[kRCObject|kContainsPointers] = containsPointersRCAllocs;
269 allocsTable[0] = noPointersAllocs;
270
271 VMPI_memset::memset(m_bitsFreelists, 0, sizeof(uint32_t*) * kNumSizeClasses);
272 m_bitsNext = (uint32_t*)heapAlloc(1);
273
274 // precondition for emptyPageList
275 GCAssert(offsetof(GCLargeAlloc::LargeBlock, next) == offsetof(GCAlloc::GCBlock, next))do { } while (0);
276
277
278 for(int i=0; i<GCV_COUNT; i++)
279 {
280 SetGCContextVariable(i, NULL__null);
281 }
282
283 allocaInit();
284
285 MMGC_GCENTER(this)MMgc::GCAutoEnter __mmgc_auto_enter(this);;
286 emptyWeakRef = new (this) GCWeakRef(NULL__null); // Note, we don't profile this one because the profiler is not yet allocated
287
288 m_incrementalWork.SetDeadItem(emptyWeakRef); // The empty weak ref is as good an object as any for this
289
290#ifdef MMGC_CONSERVATIVE_PROFILER
291 if (demos == NULL__null && heap->profiler != NULL__null)
292 demos = new AllocationSiteProfiler(this, "Conservative scanning volume incurred by allocation site");
293#endif
294#ifdef MMGC_WEAKREF_PROFILER
295 if (weaklings == NULL__null && heap->profiler != NULL__null)
296 weaklings = new WeakRefAllocationSiteProfiler(this, "Weak reference allocation volume by allocation site");
297#endif
298#ifdef MMGC_DELETION_PROFILER
299 if (deletos == NULL__null && heap->profiler != NULL__null)
300 deletos = new DeletionProfiler(this, "Explicit deletion of managed storage");
301#endif
302 gcheap->AddGC(this);
303 gcheap->AddOOMCallback(this);
304
305 }
306
307#ifdef _MSC_VER
308# pragma warning(pop)
309#endif
310
311 GC::~GC()
312 {
313#ifdef MMGC_CONSERVATIVE_PROFILER
314 if (demos != NULL__null)
315 {
316 GCLog("Exactly traced percentage: %u\n", policy.queryExactPercentage());
317 demos->dumpTopBacktraces(30, demos->BY_COUNT);
318 delete demos;
319 demos = NULL__null;
320 }
321#endif
322#ifdef MMGC_WEAKREF_PROFILER
323 if (weaklings != NULL__null)
324 {
325 GCLog("Peak weakref population: %u\n", weaklings->peakPopulation);
326 GCLog("Probes: %llu accesses: %llu ratio: %f\n",
327 (unsigned long long)weakRefs.probes,
328 (unsigned long long)weakRefs.accesses,
329 (double)weakRefs.probes/(double)weakRefs.accesses);
330 GCLog("GCs: %u, scanned: %llu, removed: %llu\n",
331 weaklings->collections,
332 (unsigned long long)weaklings->scannedAtGC,
333 (unsigned long long)weaklings->removedAtGC);
334 weaklings->dumpTopBacktraces(30, weaklings->BY_COUNT);
335 delete weaklings;
336 weaklings = NULL__null;
337 }
338#endif
339#ifdef MMGC_HEAP_GRAPH
340 printBlacklist();
341#endif
342 policy.shutdown();
343 allocaShutdown();
344
345 // Do this before calling GCHeap::RemoveGC as GCAutoEnter::Destroy
346 // expect this GC to still be the active GC and GCHeap::RemoveGC clears
347 // the active GC.
348 if(stackEnter != NULL__null) {
349 stackEnter->Destroy(falsefalse);
350 }
351
352 heap->RemoveGC(this);
353 heap->RemoveOOMCallback(this);
354
355 // Force all objects to be destroyed
356 destroying = truetrue;
357
358 {
359 MMGC_GCENTER(this)MMgc::GCAutoEnter __mmgc_auto_enter(this);;
360 ForceSweepAtShutdown();
361 }
362
363 for (int i=0; i < kNumSizeClasses; i++) {
364 mmfx_delete( containsPointersAllocs[i])::MMgcDestructTaggedScalarChecked(containsPointersAllocs[i]);
365 mmfx_delete(containsPointersRCAllocs[i])::MMgcDestructTaggedScalarChecked(containsPointersRCAllocs[i]
)
;
366 mmfx_delete(noPointersAllocs[i])::MMgcDestructTaggedScalarChecked(noPointersAllocs[i]);
367 }
368
369 if (largeAlloc) {
370 mmfx_delete(largeAlloc)::MMgcDestructTaggedScalarChecked(largeAlloc);
371 }
372
373 // Go through m_bitsFreelist and collect list of all pointers
374 // that are on page boundaries into new list, pageList
375 void **pageList = NULL__null;
376 for(int i=0, n=kNumSizeClasses; i<n; i++) {
377 uint32_t* bitsFreelist = m_bitsFreelists[i];
378 while(bitsFreelist) {
379 uint32_t *next = *(uint32_t**)bitsFreelist;
380 if((uintptr_t(bitsFreelist) & GCHeap::kOffsetMask) == 0) {
381 *((void**)bitsFreelist) = pageList;
382 pageList = (void**)bitsFreelist;
383 }
384 bitsFreelist = next;
385 }
386 }
387
388 // Go through page list and free all pages on it
389 while (pageList) {
390 void **next = (void**) *pageList;
391 heapFree((void*)pageList);
392 pageList = next;
393 }
394
395 pageMap.DestroyPageMapVia(heap);
396
397 GCAssert(!m_roots)do { } while (0);
398 GCAssert(!m_callbacks)do { } while (0);
399
400 // apparently the player can't be made to clean up so keep it from crashing at least
401 while(m_roots) {
402 m_roots->Destroy();
403 }
404
405 while(m_callbacks) {
406 m_callbacks->Destroy();
407 }
408
409 zct.Destroy();
410
411#ifdef MMGC_DELETION_PROFILER
412 /* 'deletos' should only be deleted after ForceSweepAtShutdown()
413 * as that can lead to ProfileExplicitDeletion() calls
414 */
415 if (deletos != NULL__null)
416 {
417 deletos->dumpTopBacktraces(30, deletos->BY_COUNT);
418 delete deletos;
419 deletos = NULL__null;
420 }
421#endif
422 GCAssertMsg(GetNumBlocks() == 0, "GC accounting off")do { } while (0);
423
424 GCAssertMsg(enterCount == 0, "GC enter/exit paths broken")do { } while (0);
425
426 VMPI_lockDestroy(&m_gcLock);
427 VMPI_lockDestroy(&m_rootListLock);
428 }
429
430 void GC::Collect(boolbool scanStack, boolbool okToShrinkHeapTarget)
431 {
432 GCAssertMsg(!scanStack || onThread(), "Full collection with stack scan requested however the GC isn't associated with a thread, missing MMGC_GCENTER macro.")do { } while (0);
433
434 if (nogc || markerActive || collecting || Reaping()) {
435 return;
436 }
437
438 ReapZCT(scanStack);
439
440 if(!marking)
441 StartIncrementalMark();
442 if(marking)
443 FinishIncrementalMark(scanStack, okToShrinkHeapTarget);
444
445#ifdef _DEBUG
446 // Dumping the stack trace at GC time can be very helpful with stack walk bugs
447 // where something was created, stored away in untraced, unmanaged memory and not
448 // reachable by the conservative stack walk
449 //DumpStackTrace();
450 FindUnmarkedPointers();
451#endif
452
453 policy.fullCollectionComplete();
454 }
455
456 void GC::Collect(double allocationBudgetFractionUsed)
457 {
458 if (allocationBudgetFractionUsed < 0.25) allocationBudgetFractionUsed = 0.25;
459 if (allocationBudgetFractionUsed > 1.0) allocationBudgetFractionUsed = 1.0;
460
461 // Spec says "if the parameter exceeds the fraction used ..."
462 if (policy.queryAllocationBudgetFractionUsed() <= allocationBudgetFractionUsed)
463 return;
464
465 GC::Collect(truetrue, falsefalse);
466 }
467
468 REALLY_INLINEinline __attribute__((always_inline)) void GC::Push_StackMemory(const void* p, uint32_t size, const void* baseptr)
469 {
470 GCAssert(p != NULL)do { } while (0);
471 GCAssert(!IsPointerToGCPage(GetRealPointer(p)) || !IsPointerToGCObject(GetRealPointer(p)))do { } while (0);
472
473 if (!m_incrementalWork.Push_StackMemory(p, size, baseptr))
474 SignalMarkStackOverflow_NonGCObject();
475 }
476
477 REALLY_INLINEinline __attribute__((always_inline)) boolbool GC::Push_RootProtector(const void* p)
478 {
479 GCAssert(p != NULL)do { } while (0);
480 GCAssert(!IsPointerToGCPage(GetRealPointer(p)) || !IsPointerToGCObject(GetRealPointer(p)))do { } while (0);
481
482 if (!m_incrementalWork.Push_RootProtector(p)) {
483 SignalMarkStackOverflow_NonGCObject();
484 return falsefalse;
485 }
486 return truetrue;
487 }
488
489 REALLY_INLINEinline __attribute__((always_inline)) void GC::Push_LargeObjectProtector(const void* p)
490 {
491 GCAssert(p != NULL)do { } while (0);
492 GCAssert(IsPointerToGCPage(GetRealPointer(p)) && IsPointerToGCObject(GetRealPointer(p)))do { } while (0);
493
494 if (!m_incrementalWork.Push_LargeObjectProtector(p))
495 SignalMarkStackOverflow_NonGCObject();
496 }
497
498 REALLY_INLINEinline __attribute__((always_inline)) void GC::Push_LargeExactObjectTail(const void* p, size_t cursor)
499 {
500 GCAssert(p != NULL)do { } while (0);
501 GCAssert(cursor != 0)do { } while (0);
502 GCAssert(IsPointerToGCPage(GetRealPointer(p)) && IsPointerToGCObject(GetRealPointer(p)))do { } while (0);
503
504 if (!m_incrementalWork.Push_LargeExactObjectTail(p, cursor))
505 SignalMarkStackOverflow_NonGCObject();
506 }
507
508 REALLY_INLINEinline __attribute__((always_inline)) void GC::Push_LargeObjectChunk(const void *p, uint32_t size, const void* baseptr)
509 {
510 GCAssert(p != NULL)do { } while (0);
511 GCAssert(IsPointerToGCPage(p))do { } while (0);
512 GCAssert(IsPointerToGCPage((char*)p + size - 1))do { } while (0);
513
514 if (!m_incrementalWork.Push_LargeObjectChunk(p, size, baseptr))
515 SignalMarkStackOverflow_NonGCObject();
516 }
517
518 REALLY_INLINEinline __attribute__((always_inline)) void GC::Push_LargeRootChunk(const void *p, uint32_t size, const void* baseptr)
519 {
520 GCAssert(p != NULL)do { } while (0);
521 GCAssert(!IsPointerToGCPage(p))do { } while (0);
522 GCAssert(!IsPointerToGCPage((char*)p + size - 1))do { } while (0);
523
524 if (!m_incrementalWork.Push_LargeRootChunk(p, size, baseptr))
525 SignalMarkStackOverflow_NonGCObject();
526 }
527
528 REALLY_INLINEinline __attribute__((always_inline)) void GC::Push_GCObject(const void *p)
529 {
530#ifdef DEBUG
531 WorkItemInvariants_GCObject(p);
532#endif
533 if (!m_incrementalWork.Push_GCObject(p))
534 SignalMarkStackOverflow_GCObject(p);
535 }
536
537 void GC::Push_GCObject_MayFail(const void *p)
538 {
539#ifdef DEBUG
540 WorkItemInvariants_GCObject(p);
541#endif
542 m_incrementalWork.Push_GCObject(p);
543 }
544
545#ifdef DEBUG
546 void GC::WorkItemInvariants_GCObject(const void *p)
547 {
548 GCAssert(p != NULL)do { } while (0);
549 GCAssert(IsPointerToGCObject(GetRealPointer(p)))do { } while (0);
550 GCAssert(ContainsPointers(p))do { } while (0);
551 GCAssert(!IsRCObject(p) || ((RCObject*)p)->composite != 0)do { } while (0);
552 GCAssert((GetGCBits(GetRealPointer(p)) & (kQueued|kMark)) == kQueued)do { } while (0);
553 }
554
555#endif
556
557 void GC::AbortInProgressMarking()
558 {
559 ClearMarkStack();
560 m_barrierWork.Clear();
561 ClearMarks();
562#ifdef MMGC_HEAP_GRAPH
563 markerGraph.clear();
564#endif
565 marking = falsefalse;
566 }
567
568#ifdef DEBUG
569 void GC::DRCValidationTrace(boolbool scanStack)
570 {
571 if(marking) {
572 AbortInProgressMarking();
573 }
574 performingDRCValidationTrace = truetrue;
575 MarkNonstackRoots();
576 MarkQueueAndStack(scanStack);
577#ifdef DEBUG
578 // If we're doing a mark to validate DRC then also pin
579 // RCObjects here.
580 VMPI_callWithRegistersSaved(ZCT::DoPinProgramStack, &zct);
581#endif
582 if(m_markStackOverflow) {
583 // oh well
584 AbortInProgressMarking();
585 validateDefRef = falsefalse;
586 GCLog("Warning: Mark stack overflow during DRC validation, disabling validation.");
587 }
588 performingDRCValidationTrace = falsefalse;
589 }
590#endif
591
592 GC::RCRootSegment::RCRootSegment(GC* gc, void* mem, size_t size)
593 : StackMemory(gc, mem, size)
594 , mem(mem)
595 , size(size)
596 , prev(NULL__null)
597 , next(NULL__null)
598 {}
599
600 void* GC::AllocRCRoot(size_t size)
601 {
602 const int hdr_size = (sizeof(void*) + 7) & ~7;
603
604 GCHeap::CheckForAllocSizeOverflow(size, hdr_size);
605
606 union {
607 char* block;
608 uintptr_t* block_u;
609 };
610 block = mmfx_new_array_opt(char, size + hdr_size, MMgc::kZero)::MMgcConstructTaggedArray((char*)__null, size + hdr_size, MMgc
::kZero)
;
611 void* mem = (void*)(block + hdr_size);
612 RCRootSegment *segment = new RCRootSegment(this, mem, size);
613 *block_u = (uintptr_t)segment;
614 AddRCRootSegment(segment);
615 return mem;
616 }
617
618 void GC::FreeRCRoot(void* mem)
619 {
620 const int hdr_size = (sizeof(void*) + 7) & ~7;
621 union {
622 char* block;
623 RCRootSegment** segmentp;
624 };
625 block = (char*)mem - hdr_size;
626 RCRootSegment* segment = *segmentp;
627 RemoveRCRootSegment(segment);
628 delete segment;
629 mmfx_delete_array(block)::MMgcDestructTaggedArrayChecked(block);
630 }
631
632 void GC::CollectionWork()
633 {
634 if (nogc)
635 return;
636
637 if (incremental) {
638 // If we're reaping don't do any work, this simplifies policy event timing and improves
639 // incrementality.
640 if (!collecting && !Reaping()) {
641 if (!marking) {
642 StartIncrementalMark();
643 }
644 else if (policy.queryEndOfCollectionCycle()) {
645 FinishIncrementalMark(truetrue);
646 }
647 else {
648 IncrementalMark();
649 }
650 }
651 }
652 else {
653 // non-incremental collection
654 Collect();
655 }
656 }
657
658 // Note, the interaction with the policy manager in Alloc() should
659 // be the same as in AllocDouble(), which is defined in GC.h.
660
661 // In Alloc we have separate cases for large and small objects. We want
662 // small-object allocation to be as fast as possible. Hence the relative
663 // mess of #ifdefs below, and the following explanation.
664 //
665 // Normally we round up size to 8 bytes and add DebugSize and that sum is
666 // the size that determines whether we use the small-object allocator or
667 // the large-object one:
668 //
669 // requestSize = ((size + 7) & ~7) + DebugSize()
670 //
671 // Then we shift that three places and subtract 1 to obtain the allocator
672 // index:
673 //
674 // index = (requestSize >> 3) - 1
675 // = ((((size + 7) & ~7) + DebugSize()) >> 3) - 1
676 //
677 // We want to optimize this. Consider the case of a Release build where
678 // DebugSize() == 0:
679 //
680 // = (((size + 7) & ~7) >> 3) - 1
681 //
682 // Observe that the & ~7 is redundant because those bits will be lost,
683 // and that 1 = (8 >> 3)
684 //
685 // = ((size + 7) >> 3) - (8 >> 3)
686 // = (size + 7 - 8) >> 3
687 // index = (size - 1) >> 3
688 //
689 // In Release builds, the guard for small object allocation is
690 //
691 // guard = requestSize <= kLargestAlloc
692 // = ((size + 7) & ~7) <= kLargestAlloc
693 //
694 // Yet the /effective/ guard, given that not all the bits of size are used
695 // subsequently, is simply
696 //
697 // guard = size <= kLargestAlloc
698 //
699 // To see this, consider that if size < kLargestAlloc then requestSize <= kLargestAlloc
700 // and if size > kLargestAlloc then requestSize > kLargestAlloc, because requestSize
701 // is rounded up to an 8-byte boundary and kLargestAlloc is already rounded to an 8-byte
702 // boundary.
703 //
704 // So in the small-object case in Release builds we use the simpler guard, and defer
705 // the rounding and allocation overflow checking to the large-object case.
706
707#ifdef MMGC_MEMORY_INFO
708 #ifndef _DEBUG
709 #error "Really need MMGC_MEMORY_INFO to imply _DEBUG"
710 #endif
711#endif
712#ifdef MMGC_MEMORY_PROFILER
713 #ifndef MMGC_HOOKS
714 #error "Really need MMGC_MEMORY_PROFILER to imply MMGC_HOOKS"
715 #endif
716#endif
717
718 void *GC::Alloc(size_t size, int flags/*0*/)
719 {
720#ifdef _DEBUG
721 GCAssertMsg(size > 0, "cannot allocate a 0 sized block")do { } while (0);
722 GCAssertMsg(onThread(), "GC called from different thread!")do { } while (0);
723 GCAssertMsg(!Destroying(), "GC allocations during shutdown are illegal.")do { } while (0);
724
725 if(!nogc && stackEnter == NULL__null) {
726 GCAssertMsg(false, "A MMGC_GCENTER macro must exist on the stack")do { } while (0);
727 GCHeap::SignalInconsistentHeapState("MMGC_GCENTER missing");
728 /*NOTREACHED*/
729 return NULL__null;
730 }
731
732 // always be marking in pedantic mode
733 if(incrementalValidationPedantic) {
734 if(!marking) {
735 if (!Reaping()) {
736 StartIncrementalMark();
737 }
738 }
739 }
740#endif
741#ifdef AVMPLUS_SAMPLER
742 avmplus::AvmCore *core = (avmplus::AvmCore*)GetGCContextVariable(GCV_AVMCORE);
743 if(core)
744 core->sampleCheck();
745#endif
746
747#if defined _DEBUG || defined MMGC_MEMORY_PROFILER
748 const size_t askSize = size; // preserve this for statistics gathering and profiling
749#endif
750#if defined _DEBUG
751 GCHeap::CheckForAllocSizeOverflow(size, 7+DebugSize()0);
752 size = (size+7)&~7; // round up to multiple of 8
753 size += DebugSize()0; // add in some (possibly zero) multiple of 8 bytes for debug information
754#endif
755
756 void *item; // the allocated object (a user pointer!), or NULL
757
758 // In Release builds the calls to the underlying Allocs should end up being compiled
759 // as tail calls with reasonable compilers. Try to keep it that way.
760
761 if (size <= kLargestAlloc)
762 {
763 // This is the fast lookup table implementation to find the right allocator.
764 // The table has been lifted into an instance variable because the Player is
765 // compiled with PIC and GCC generates somewhat gnarly code for that.
766#ifdef _DEBUG
767 const unsigned index = sizeClassIndex[(size>>3)-1];
768#else
769 const unsigned index = sizeClassIndex[(size-1)>>3];
770#endif
771
772 // The table avoids a thre-way decision tree.
773 GCAlloc **allocs = allocsTable[flags & (kRCObject|kContainsPointers)];
774
775 // assert that I fit
776 GCAssert(size <= allocs[index]->GetItemSize())do { } while (0);
777
778 // assert that I don't fit (makes sure we don't waste space)
779 GCAssert( (index==0) || (size > allocs[index-1]->GetItemSize()))do { } while (0);
780
781#if defined _DEBUG || defined MMGC_MEMORY_PROFILER
782 item = allocs[index]->Alloc(askSize, flags);
783#else
784 item = allocs[index]->Alloc(flags);
785#endif
786 }
787 else
788 {
789#ifndef _DEBUG
790 GCHeap::CheckForAllocSizeOverflow(size, 7+DebugSize()0);
791 size = (size+7)&~7; // round up to multiple of 8
792 size += DebugSize()0;
793#endif
794#if defined _DEBUG || defined MMGC_MEMORY_PROFILER
795 item = largeAlloc->Alloc(askSize, size, flags);
796#else
797 item = largeAlloc->Alloc(size, flags);
798#endif
799 }
800
801 // Hooks are run by the lower-level allocators, which also signal
802 // allocation work and trigger GC.
803
804 GCAssert(item != NULL || (flags & kCanFail) != 0)do { } while (0);
805
806#ifdef _DEBUG
807 // Note GetUserPointer(item) only required for DEBUG builds and for non-NULL pointers.
808
809 if(item != NULL__null) {
810 item = GetUserPointer(item)item;
811 boolbool shouldZero = (flags & kZero) || incrementalValidationPedantic;
812
813 // in debug mode memory is poisoned so we have to clear it here
814 // in release builds memory is zero'd to start and on free/sweep
815 // in pedantic mode uninitialized data can trip the write barrier
816 // detector, only do it for pedantic because otherwise we want the
817 // mutator to get the poisoned data so it crashes if it relies on
818 // uninitialized values
819 if(shouldZero) {
820 VMPI_memset::memset(item, 0, Size(item));
821 }
822 }
823#endif
824
825 return item;
826 }
827
828 // Mmmm.... gcc -O3 inlines Alloc into this in Release builds :-)
829
830 void *GC::OutOfLineAllocExtra(size_t size, size_t extra, int flags)
831 {
832 return AllocExtra(size, extra, flags);
833 }
834
835 void GC::AbortFree(const void* item)
836 {
837 // Always clear the contents to avoid false retention of data: the
838 // object will henceforth be scanned conservatively, and it may be
839 // reachable from the stack or from other objects (dangling pointers
840 // caused by improper use of Free(), or non-pointers interpreted
841 // as pointers by the conservative scanner).
842
843 Zero(item);
844
845 // It would not be right to clear the finalized bit if we could get here other
846 // than via operator delete, but there's code in the GC interface to ensure
847 // that GC::Free and FreeNotNull are not invoked on finalized objects. Thus
848 // we're OK clearing the finalized bit here without worrying about whether the
849 // destructor needs to be run.
850 //
851 // Weakly held or exactly traced objects do not have similar correctness issues.
852 //
853 // See Bugzilla 589102 for more information.
854
855 ClearFinalized(item);
856 if(HasWeakRef(item))
857 ClearWeakRef(item);
858 }
859
860 void GC::Zero(const void* userptr)
861 {
862 if (IsFinalized(userptr)) {
863 GCFinalizedObject* obj = (GCFinalizedObject*)userptr;
864 VMPI_memset::memset((char*)obj + sizeof(GCFinalizedObject), 0, GC::Size(obj) - sizeof(GCFinalizedObject));
865 }
866 else if (IsExactlyTraced(userptr)) {
867 GCTraceableObject* obj = (GCTraceableObject*)userptr;
868 VMPI_memset::memset((char*)obj + sizeof(GCTraceableObject), 0, GC::Size(obj) - sizeof(GCTraceableObject));
869 }
870 else {
871 GCObject* obj = (GCObject*)userptr;
872 VMPI_memset::memset((char*)obj + sizeof(GCObject), 0, GC::Size(obj) - sizeof(GCObject));
873 }
874 }
875
876 // Observe that nbytes is never more than kBlockSize, and the budget applies only to
877 // objects tied up in quick lists, not in on-block free lists. So given that the
878 // total budget for the GC is at least kBlockSize, the loop is guaranteed to terminate.
879
880 void GC::ObtainQuickListBudget(size_t nbytes)
881 {
882 // Flush quick lists until we have enough for the requested budget. We
883 // pick victims in round-robin fashion: there's a major iterator that
884 // selects the allocator pool, and a minor iterator that selects an
885 // allocator in that pool.
886
887 while (remainingQuickListBudget <= nbytes) {
1
Loop condition is true. Entering loop body
888 GCAlloc** allocs = NULL__null;
889 switch (victimIterator % 3) {
2
'Default' branch taken. Execution continues on line 894
890 case 0: allocs = containsPointersAllocs; break;
891 case 1: allocs = containsPointersRCAllocs; break;
892 case 2: allocs = noPointersAllocs; break;
893 }
894 allocs[victimIterator / 3]->CoalesceQuickList();
3
Array access (from variable 'allocs') results in a null pointer dereference
895 victimIterator = (victimIterator + 1) % (3*kNumSizeClasses);
896 }
897
898 remainingQuickListBudget -= nbytes;
899 }
900
901 void GC::RelinquishQuickListBudget(size_t nbytes)
902 {
903 remainingQuickListBudget += nbytes;
904 }
905
906 // I can think of two alternative policies for handling dependent allocations.
907 //
908 // One is to treat all memory the same: external code calls SignalDependentAllocation
909 // and SignalDependentDeallocation to account for the volume of data that are
910 // dependent on but not allocated by the GC, and the GC incorporates these data
911 // into policy decisions, heap target computation, and so on.
912 //
913 // (Benefits: few surprises policy-wise.
914 //
915 // Problems: If the volume (in bytes) of dependent data swamps the normal data
916 // then the collection policy may not work all that well -- collection cost may be
917 // too high, and memory usage may also be too high because the budget is L times
918 // the live memory, which may include a lot of bitmap data.)
919 //
920 // The other is to treat dependent memory differently. SignalDependentAllocation
921 // and SignalDependentDeallocation will account for the external memory, and if
922 // the volume of dependent memory is high relative to the budget (which is
923 // computed without regard for the volume of dependent memory) then those two
924 // methods will drive the collector forward one step.
925 //
926 // (Benefits: not overly sensitive to the volume of bitmap data, especially in
927 // computing the budget, yet a sufficient number of allocations will force the gc
928 // cycle to complete.
929 //
930 // Problems: It is a bit ad-hoc and it may fail to reclaim a large volume of
931 // bitmap data quickly enough.)
932 //
933 // I'm guessing the former is better, for now.
934
935 void GC::SignalDependentAllocation(size_t nbytes)
936 {
937 policy.signalDependentAllocation(nbytes);
938 SignalAllocWork(nbytes);
939 }
940
941 void GC::SignalDependentDeallocation(size_t nbytes)
942 {
943 policy.signalDependentDeallocation(nbytes);
944 SignalFreeWork(nbytes);
945 }
946
947 void GC::ClearMarks()
948 {
949 // ClearMarks may sweep, although I'm far from sure that that's a good idea,
950 // as SignalImminentAbort calls ClearMarks.
951
952 EstablishSweepInvariants();
953
954 for (int i=0; i < kNumSizeClasses; i++) {
955 containsPointersRCAllocs[i]->ClearMarks();
956 containsPointersAllocs[i]->ClearMarks();
957 noPointersAllocs[i]->ClearMarks();
958 }
959 largeAlloc->ClearMarks();
960 m_markStackOverflow = falsefalse;
961 }
962
963 void GC::Finalize()
964 {
965 MarkOrClearWeakRefs();
966
967 for(int i=0; i < kNumSizeClasses; i++) {
968 containsPointersRCAllocs[i]->Finalize();
969 containsPointersAllocs[i]->Finalize();
970 noPointersAllocs[i]->Finalize();
971 }
972 largeAlloc->Finalize();
973 finalizedValue = !finalizedValue;
974
975
976 for(int i=0; i < kNumSizeClasses; i++) {
977 containsPointersRCAllocs[i]->m_finalized = falsefalse;
978 containsPointersAllocs[i]->m_finalized = falsefalse;
979 noPointersAllocs[i]->m_finalized = falsefalse;
980 }
981 }
982
983 void GC::SweepNeedsSweeping()
984 {
985 EstablishSweepInvariants();
986
987 // clean up any pages that need sweeping
988 for(int i=0; i < kNumSizeClasses; i++) {
989 containsPointersRCAllocs[i]->SweepNeedsSweeping();
990 containsPointersAllocs[i]->SweepNeedsSweeping();
991 noPointersAllocs[i]->SweepNeedsSweeping();
992 }
993 }
994
995 void GC::EstablishSweepInvariants()
996 {
997 for(int i=0; i < kNumSizeClasses; i++) {
998 containsPointersRCAllocs[i]->CoalesceQuickList();
999 containsPointersAllocs[i]->CoalesceQuickList();
1000 noPointersAllocs[i]->CoalesceQuickList();
1001 }
1002 }
1003
1004 void GC::ClearMarkStack()
1005 {
1006 // GCRoot have pointers into the mark stack, clear them.
1007 {
1008 MMGC_LOCK(m_rootListLock)MMgc::GCAcquireSpinlock _lock(&m_rootListLock);
1009 GCRoot *r = m_roots;
1010 while(r) {
1011 r->ClearMarkStackSentinelPointer();
1012 r = r->next;
1013 }
1014 }
1015 m_incrementalWork.Clear();
1016 }
1017
1018 /*static*/
1019 void GC::SetHasWeakRef(const void *userptr, boolbool flag)
1020 {
1021 const void *realptr = GetRealPointer(userptr)userptr;
1022 GCAssert(GetGC(realptr)->IsPointerToGCObject(realptr))do { } while (0);
1023 if (flag) {
1024 GetGCBits(realptr) |= kHasWeakRef;
1025 // Small-object allocators maintain an extra flag
1026 // about weak objects in a block, need to set that
1027 // flag here.
1028 if (!GCLargeAlloc::IsLargeBlock(realptr))
1029 GCAlloc::SetBlockHasWeakRef(userptr);
1030 }
1031 else
1032 GetGCBits(realptr) &= ~kHasWeakRef;
1033 }
1034
1035 /* The main wrinkle here is to keep a weakref alive with its object:
1036 * The invariant is that there is a weakref object for an object iff the
1037 * kHasWeakRef bit is set on the object. With weakrefs not being finalized,
1038 * a weakref could be GC'd without properly clearing the kHasWeakRef bit
1039 * in the object if the object is kept alive by a strong pointer, thus
1040 * violating the invariant. The invariant is upheld by keeping the weakref
1041 * alive. (I suppose it could also have been upheld by clearing the
1042 * kHasWeakRef bit on the object and letting the weakref be GC'd, but
1043 * that seemed trickier. Bugzilla 656942.)
1044 */
1045 void GC::MarkOrClearWeakRefs()
1046 {
1047 {
1048 GCHashtable::Iterator it(&weakRefs);
1049#ifdef MMGC_WEAKREF_PROFILER
1050 uint32_t count = weakRefs.count();
1051 uint32_t deleted = 0;
1052#endif
1053
1054 while (it.nextKey() != NULL__null) {
1055 GCWeakRef* w = (GCWeakRef*)it.value();
1056 GCObject* o = w->peek();
1057 if (o != NULL__null && !GC::GetMark(o)) {
1058#ifdef MMGC_WEAKREF_PROFILER
1059 deleted++;
1060#endif
1061 ClearWeakRef(o, falsefalse);
1062 }
1063 else if (!GC::GetMark(w))
1064 GC::SetMark(w);
1065 }
1066 }
1067 weakRefs.prune();
1068#ifdef MMGC_WEAKREF_PROFILER
1069 if (weaklings)
1070 weaklings->reportGCStats(count, deleted);
1071#endif
1072 }
1073
1074 void GC::ForceSweepAtShutdown()
1075 {
1076 // There are two preconditions for Sweep: the mark stacks must be empty, and
1077 // the queued bits must all be clear (except as part of kFreelist).
1078 //
1079 // We are doing this at shutdown, so don't bother with pushing the marking through
1080 // to the end; just clear the stacks, clear the mark bits, and sweep. The objects
1081 // will be finalized and deallocated and the blocks will be returned to the block
1082 // manager.
1083
1084 AbortInProgressMarking();
1085
1086 // System invariant: collecting == false || marking == true
1087 marking = truetrue;
1088 Sweep();
1089 marking = falsefalse;
1090 }
1091
1092 void GC::Sweep()
1093 {
1094 // Applications using -memstats for peak heap measurements need info printed before the sweep
1095
1096 if(heap->Config().gcstats) {
1097 gclog("[mem] sweep-start\n");
1098 }
1099
1100 // 'collecting' must be true because it indicates allocations should
1101 // start out marked and the write barrier should short-circuit (not
1102 // record any hits). We can't rely on write barriers to mark objects
1103 // during finalization below since presweep or finalization could write
1104 // a new GC object to a root.
1105
1106 GCAssert(m_incrementalWork.Count() == 0)do { } while (0);
1107 GCAssert(m_barrierWork.Count() == 0)do { } while (0);
1108
1109 // This clears the quick lists in the small-block allocators. It's crucial
1110 // that we do that before we set 'collecting' and call any callbacks, because
1111 // the cleared quick lists make sure that the slow allocation path that
1112 // checks m_gc->collecting is taken, and that is required in order to
1113 // allocate objects as marked under some circumstances.
1114
1115 EstablishSweepInvariants();
1116
1117 collecting = truetrue;
1118 zct.StartCollecting();
1119
1120 SAMPLE_FRAME("[sweep]", core());
1121 sweeps++;
1122
1123 size_t heapSize = heap->GetUsedHeapSize();
1124
1125#ifdef MMGC_MEMORY_PROFILER
1126 if(heap->Config().autoGCStats) {
1127 GCLog("Before sweep memory info:\n");
1128 DumpMemoryInfo();
1129 }
1130#endif
1131
1132 GCAssert(m_incrementalWork.Count() == 0)do { } while (0);
1133 GCAssert(m_barrierWork.Count() == 0)do { } while (0);
1134
1135 presweeping = truetrue;
1136 // invoke presweep on all callbacks
1137 DoPreSweepCallbacks();
1138 presweeping = falsefalse;
1139
1140 SAMPLE_CHECK();
1141
1142 // The presweep callbacks can't drive marking or trigger write barriers as the barrier is disabled,
1143 // but they can cause elements to be pushed onto the mark stack explicitly and it's necessary to call mark.
1144 // One example of callback that pushes work items is the Sampler::presweep(). Another is the read
1145 // barrier in GCWeakRef::get() - see Bugzilla 572331.
1146 do {
1147 if (m_markStackOverflow) {
1148 m_markStackOverflow = falsefalse;
1149 HandleMarkStackOverflow();
1150 }
1151 Mark();
1152 } while (m_markStackOverflow);
1153
1154 SAMPLE_CHECK();
1155
1156 GCAssert(m_incrementalWork.Count() == 0)do { } while (0);
1157 GCAssert(m_barrierWork.Count() == 0)do { } while (0);
1158
1159#ifdef MMGC_HEAP_GRAPH
1160 // if destroying, then marks are gone and thus pruning is meaningless
1161 if (!destroying)
1162 pruneBlacklist();
1163#endif
1164
1165 Finalize();
1166
1167 SAMPLE_CHECK();
1168
1169 GCAssert(m_incrementalWork.Count() == 0)do { } while (0);
1170 GCAssert(m_barrierWork.Count() == 0)do { } while (0);
1171
1172 int sweepResults = 0;
1173
1174 // ISSUE: this could be done lazily at the expense other GC's potentially expanding
1175 // unnecessarily, not sure its worth it as this should be pretty fast
1176 GCAlloc::GCBlock *b = smallEmptyPageList;
1177 while(b) {
1178 GCAlloc::GCBlock *next = GCAlloc::Next(b);
1179 GCAlloc* alloc = (GCAlloc*)b->alloc;
1180#ifdef _DEBUG
1181 alloc->SweepGuts(b);
1182#endif
1183 alloc->FreeChunk(b);
1184
1185 sweepResults++;
1186 b = next;
1187 }
1188 smallEmptyPageList = NULL__null;
1189
1190 SAMPLE_CHECK();
1191
1192 GCLargeAlloc::LargeBlock *lb = largeEmptyPageList;
1193 while(lb) {
1194 GCLargeAlloc::LargeBlock *next = GCLargeAlloc::Next(lb);
1195#ifdef MMGC_HOOKS
1196 if(heap->HooksEnabled())
1197 heap->FreeHook(GetUserPointer(lb->GetObject())lb->GetObject(), lb->size - DebugSize()0, uint8_t(GCHeap::GCSweptPoison));
1198#endif
1199 int numBlocks = lb->GetNumBlocks();
1200 sweepResults += numBlocks;
1201 VALGRIND_MEMPOOL_FREE(lb, lb){};
1202 VALGRIND_MEMPOOL_FREE(lb, lb->GetObject()){};
1203 VALGRIND_DESTROY_MEMPOOL(lb){};
1204 FreeBlock(lb, numBlocks);
1205 lb = next;
1206 }
1207 largeEmptyPageList = NULL__null;
1208
1209 if (heap->Config().eagerSweeping)
1210 SweepNeedsSweeping();
1211
1212 // we potentially freed a lot of memory, tell heap to regulate
1213 heap->Decommit();
1214
1215 SAMPLE_CHECK();
1216
1217#ifdef MMGC_MEMORY_PROFILER
1218 if(heap->Config().autoGCStats) {
1219 GCLog("After sweep memory info:\n");
1220 DumpMemoryInfo();
1221 }
1222#endif
1223
1224 // don't want postsweep to fire WB's
1225 collecting = falsefalse;
1226 marking = falsefalse;
1227 zct.EndCollecting();
1228
1229 // invoke postsweep callback
1230 DoPostSweepCallbacks();
1231
1232 SAMPLE_CHECK();
1233
1234 if(heap->Config().gcstats) {
1235 // include large pages given back
1236 sweepResults += int(heapSize - heap->GetUsedHeapSize());
1237 double millis = duration(sweepStart);
1238 gclog("[mem] sweep(%d) reclaimed %d whole pages (%d kb) in %.2f millis (%.4f s)\n",
1239 sweeps, sweepResults, sweepResults*GCHeap::kBlockSize / 1024, millis,
1240 duration(t0)/1000);
1241 }
1242
1243#ifdef MMGC_HEAP_GRAPH
1244 if (!destroying)
1245 printBlacklist();
1246#endif
1247
1248 }
1249
1250 void* GC::AllocBlock(int size, PageMap::PageType pageType, boolbool zero, boolbool canFail)
1251 {
1252 GCAssert(size > 0)do { } while (0);
1253
1254 void *item = heapAlloc(size, GCHeap::kExpand| (zero ? GCHeap::kZero : 0) | (canFail ? GCHeap::kCanFail : 0));
1255
1256 // mark GC pages in page map, small pages get marked one,
1257 // the first page of large pages is 3 and the rest are 2
1258 if(item) {
1259 MarkGCPages(item, 1, pageType);
1260 if(pageType == PageMap::kGCLargeAllocPageFirst) {
1261 MarkGCPages((char*)item+GCHeap::kBlockSize, size - 1, PageMap::kGCLargeAllocPageRest);
1262 }
1263 }
1264
1265 return item;
1266 }
1267
1268 void GC::FreeBlock(void *ptr, uint32_t size)
1269 {
1270 // Bugzilla 551833: Unmark first so that any OOM or other action triggered
1271 // by heapFree does not examine bits representing pages that are gone.
1272 UnmarkGCPages(ptr, size);
1273 heapFree(ptr, size, falsefalse);
1274 }
1275
1276 void *GC::FindBeginningGuarded(const void *gcItem, boolbool allowGarbage)
1277 {
1278 (void)allowGarbage;
1279 void *realItem = NULL__null;
1280 int bits = GetPageMapValueGuarded((uintptr_t)gcItem);
1281 switch(bits)
1282 {
1283 case PageMap::kGCAllocPage:
1284 {
1285 GCAlloc::GCBlock* b = GCAlloc::GetBlock(gcItem);
1286 if(gcItem < b->items)
1287 return NULL__null;
1288 realItem = GCAlloc::FindBeginning(gcItem);
1289 break;
1290 }
1291 case PageMap::kGCLargeAllocPageFirst:
1292 {
1293 realItem = GCLargeAlloc::GetLargeBlock(gcItem)->GetObject();
1294 if(gcItem < realItem)
1295 return NULL__null;
1296 break;
1297 }
1298 case PageMap::kGCLargeAllocPageRest:
1299 while(bits == PageMap::kGCLargeAllocPageRest)
1300 {
1301 gcItem = (void*) ((uintptr_t)gcItem - GCHeap::kBlockSize);
1302 bits = GetPageMapValue((uintptr_t)gcItem);
1303 }
1304 // can't use GCLargeAlloc::FindBeginning it asserts on header pointers
1305 realItem = GCLargeAlloc::GetLargeBlock(gcItem)->GetObject();
1306 break;
1307 default:
1308 GCAssertMsg(allowGarbage, "FindBeginningGuarded must not be called on non-managed addresses")do { } while (0);
1309 // The NULL return is a documented effect of this function, and is desired, despite
1310 // the assert above.
1311 return NULL__null;
1312 }
1313 return GetUserPointer(realItem)realItem;
1314 }
1315
1316 void GC::Mark()
1317 {
1318 markerActive++;
1319 while(m_incrementalWork.Count()) {
1320 const void* ptr;
1321 if ((ptr = m_incrementalWork.Pop_GCObject()) != NULL__null)
1322 MarkItem_GCObject(ptr);
1323 else
1324 MarkTopItem_NonGCObject();
1325 }
1326 markerActive--;
1327 }
1328
1329 void GC::Mark(uint32_t count)
1330 {
1331 // Caller's responsibility to make the marker active!
1332 GCAssert(markerActive > 0)do { } while (0);
1333 for(unsigned int i=0; i<count && !m_incrementalWork.IsEmpty() ; i++) {
1334 const void* ptr;
1335 if ((ptr = m_incrementalWork.Pop_GCObject()) != NULL__null)
1336 MarkItem_GCObject(ptr);
1337 else
1338 MarkTopItem_NonGCObject();
1339 }
1340 }
1341
1342 // This must not trigger OOM handling. It is /possible/ to restructure the
1343 // function to allow the use of heapAlloc() and heapFree() but the contortions
1344 // and resulting fragility really do not seem worth it. (Consider that
1345 // heapAlloc can trigger OOM which can invoke GC which can run finalizers
1346 // which can perform allocation that can cause MarkGCPages to be reentered.)
1347 //
1348 // The use of non-OOM functions may of course lead to worse OOM behavior, but
1349 // if the allocation of large page maps cause OOM events as a result of
1350 // increased fragmentation then the proper fix would be to restructure the page
1351 // map to be less monolithic.
1352 //
1353 // (See Bugzilla 551833.)
1354
1355 void GC::MarkGCPages(void *item, uint32_t numPages, PageMap::PageType to)
1356 {
1357 pageMap.ExpandSetAll(heap, item, numPages, to);
1358 }
1359
1360 void GC::UnmarkGCPages(void *item, uint32_t numpages)
1361 {
1362 pageMap.ClearAddrs(item, numpages);
1363 }
1364
1365 void GC::CleanStack(boolbool force)
1366 {
1367 if(!force && (stackCleaned || rememberedStackTop == 0))
1368 return;
1369 stackCleaned = truetrue;
1370 VMPI_callWithRegistersSaved(GC::DoCleanStack, this);
1371 }
1372
1373 /*static*/
1374 void GC::DoCleanStack(void* stackPointer, void* arg)
1375 {
1376 GC* gc = (GC*)arg;
1377 if( ((char*) stackPointer > (char*)gc->rememberedStackTop) && ((char *)gc->stackEnter > (char*)stackPointer)) {
1378 size_t amount = (char*)stackPointer - (char*)gc->rememberedStackTop;
1379 VMPI_cleanStack(amount);
1380 }
1381 }
1382
1383 void GC::MarkQueueAndStack(boolbool scanStack)
1384 {
1385 if(scanStack) {
1386 MarkStackRoots();
1387 VMPI_callWithRegistersSaved(GC::DoMarkFromStack, this);
1388 }
1389 else
1390 Mark();
1391 }
1392
1393 /*static*/
1394 void GC::DoMarkFromStack(void* stackPointer, void* arg)
1395 {
1396 GC* gc = (GC*)arg;
1397 char* stackBase = (char*)gc->GetStackTop();
1398
1399 // this is where we will clear to when CleanStack is called
1400 if(gc->rememberedStackTop < stackPointer)
1401 gc->rememberedStackTop = stackPointer;
1402
1403 // Push the stack onto the mark stack and then mark synchronously until
1404 // everything reachable from the stack has been marked.
1405
1406 gc->Push_StackMemory(stackPointer, uint32_t(stackBase - (char*)stackPointer), stackPointer);
1407 gc->Mark();
1408 }
1409
1410 struct CreateRootFromCurrentStackArgs
1411 {
1412 GC* gc;
1413 void (*fn)(void* arg);
1414 void *arg;
1415 };
1416
1417 static void DoCreateRootFromCurrentStack(void* stackPointer, void* arg)
1418 {
1419 CreateRootFromCurrentStackArgs* context = (CreateRootFromCurrentStackArgs*)arg;
1420 MMgc::GC::AutoRCRootSegment root(context->gc, stackPointer, uint32_t((char*)context->gc->GetStackTop() - (char*)stackPointer));
1421 MMgc::GCAutoEnterPause mmgc_enter_pause(context->gc);
1422 context->fn(context->arg); // Should not be a tail call as those stack variables have destructors
1423 }
1424
1425 void GC::CreateRootFromCurrentStack(void (*fn)(void* arg), void* arg)
1426 {
1427 CreateRootFromCurrentStackArgs arg2;
1428 arg2.gc = this;
1429 arg2.fn = fn;
1430 arg2.arg = arg;
1431 VMPI_callWithRegistersSaved(DoCreateRootFromCurrentStack, &arg2);
1432 }
1433
1434 void GCRoot::init(GC* _gc, const void *_object, size_t _size, boolbool _isStackMemory, boolbool _isExactlyTraced)
1435 {
1436 GCAssert(_object != NULL || _size == 0)do { } while (0);
1437 GCAssert(_size % 4 == 0)do { } while (0);
1438 GCAssert(uintptr_t(_object) % 4 == 0)do { } while (0);
1439
1440 gc = _gc;
1441 object = _object;
1442 size = (_size | (_isExactlyTraced ? kIsExactlyTraced : 0) | (_isStackMemory ? kIsStackMemory : 0));
1443 markStackSentinel = 0;
1444 gc->AddRoot(this);
1445 }
1446
1447 GCRoot::GCRoot(GC * _gc, const void * _object, size_t _size, boolbool _isStackMemory, boolbool _isExactlyTraced)
1448 {
1449 init(_gc, _object, _size, _isStackMemory, _isExactlyTraced);
1450 }
1451
1452 GCRoot::GCRoot(GC * _gc, const void * _object, size_t _size)
1453 {
1454 init(_gc, _object, _size, falsefalse, falsefalse);
1455 }
1456
1457 GCRoot::GCRoot(GC * _gc)
1458 {
1459 init(_gc, this, FixedMalloc::GetFixedMalloc()->Size(this), falsefalse, falsefalse);
1460 }
1461
1462 GCRoot::GCRoot(GC * _gc, GCExactFlag)
1463 {
1464 init(_gc, this, FixedMalloc::GetFixedMalloc()->Size(this), falsefalse, truetrue);
1465 }
1466
1467 GCRoot::~GCRoot()
1468 {
1469 Destroy();
1470 }
1471
1472 void GCRoot::PrivilegedSet(const void* _object, uint32_t _size)
1473 {
1474 GCAssert(_object != NULL || _size == 0)do { } while (0);
1475 GCAssert(_size % 4 == 0)do { } while (0);
1476 GCAssert(uintptr_t(_object) % 4 == 0)do { } while (0);
1477
1478 SetMarkStackSentinelPointer(0); // Set of 0 is *not* equiv of Clear
1479 object = _object;
1480 size = (_size | (size & kFlags)); // Flags are sticky
1481 }
1482
1483 void GCRoot::GetConservativeWorkItem(const void*& _object, uint32_t& _size, boolbool& _isStackMemory) const
1484 {
1485 _object = object;
1486 _size = (uint32_t)(size & ~kFlags);
1487 _isStackMemory = (boolbool)(size & kIsStackMemory);
1488 if (_isStackMemory)
1489 {
1490 uint32_t newSize = uint32_t(((StackMemory*)this)->Top());
1491 GCAssert(newSize <= _size)do { } while (0);
1492 _size = newSize;
1493 }
1494 }
1495
1496 uintptr_t GCRoot::GetMarkStackSentinelPointer()
1497 {
1498 return markStackSentinel;
1499 }
1500
1501 void GCRoot::SetMarkStackSentinelPointer(uintptr_t sentinel)
1502 {
1503 // If the sentinel is not zero then there is a protector item on the
1504 // stack, and above it (usually right above it) may be a fragment of
1505 // the root. Clear them both since we need to rescan the whole thing.
1506 if (markStackSentinel != 0)
1507 gc->m_incrementalWork.ClearRootProtectorAndChunkAbove(markStackSentinel, this);
1508
1509 markStackSentinel = sentinel;
1510 }
1511
1512 void GCRoot::ClearMarkStackSentinelPointer()
1513 {
1514 markStackSentinel = 0;
1515 }
1516
1517 void GCRoot::Destroy()
1518 {
1519 PrivilegedSet(NULL__null, 0);
1520 if(gc) {
1521 gc->RemoveRoot(this);
1522 }
1523 gc = NULL__null;
1524 }
1525
1526 boolbool GCRoot::gcTrace(GC*, size_t)
1527 {
1528 GCAssert(!"GCRoot::gcTrace should never be called.")do { } while (0);
1529 return falsefalse;
1530 }
1531
1532 StackMemory::StackMemory(GC * _gc, const void * _object, size_t _size, boolbool _isExact)
1533 : GCRoot(_gc, _object, _size, truetrue, _isExact)
1534 {
1535 }
1536
1537 size_t StackMemory::Top()
1538 {
1539 return Size();
1540 }
1541
1542 void StackMemory::Set(const void * _object, size_t _size)
1543 {
1544 PrivilegedSet(_object, uint32_t(_size));
1545 }
1546
1547 GCCallback::GCCallback(GC * _gc) : gc(_gc)
1548 {
1549 gc->AddCallback(this);
1550 }
1551
1552 GCCallback::~GCCallback()
1553 {
1554 if(gc) {
1555 gc->RemoveCallback(this);
1556 }
1557 }
1558
1559 void GCCallback::Destroy()
1560 {
1561 if(gc) {
1562 gc->RemoveCallback(this);
1563 }
1564 gc = NULL__null;
1565 }
1566
1567#ifdef _DEBUG
1568 boolbool GC::IsPointerIntoGCObject(const void *item)
1569 {
1570 int bits = GetPageMapValueGuarded((uintptr_t)item);
1571 switch(bits) {
1572 case PageMap::kGCAllocPage:
1573 return GCAlloc::IsPointerIntoGCObject(item);
1574 case PageMap::kGCLargeAllocPageFirst:
1575 return item >= GCLargeAlloc::FindBeginning(item);
1576 case PageMap::kGCLargeAllocPageRest:
1577 return truetrue;
1578 default:
1579 return falsefalse;
1580 }
1581 }
1582#endif
1583
1584 boolbool GC::IsRCObjectSafe(const void *userptr)
1585 {
1586 return userptr != NULL__null && IsPointerToGCObject(GetRealPointer(userptr)userptr) && GC::IsRCObject(userptr);
1587 }
1588
1589#if 0
1590 // this is a release ready tool for hunting down freelist corruption
1591 void GC::CheckFreelists()
1592 {
1593 GCAlloc **allocs = containsPointersAllocs;
1594 for (int l=0; l < GC::kNumSizeClasses; l++) {
1595 GCAlloc *a = allocs[l];
1596 GCAlloc::GCBlock *_b = a->m_firstBlock;
1597 while(_b) {
1598 void *fitem = _b->firstFree;
1599 void *prev = 0;
1600 while(fitem) {
1601 if((uintptr_t(fitem) & 7) != 0) {
1602 _asm int 3;
1603 break;
1604 }
1605 if((uintptr_t(fitem) & GCHeap::kBlockMask) != uintptr_t(_b))
1606 _asm int 3;
1607 prev = fitem;
1608 fitem = *(void**)fitem;
1609 }
1610 _b = _b->next;
1611 }
1612 }
1613 }
1614#endif
1615
1616#ifdef _DEBUG
1617
1618 void GC::RCObjectZeroCheck(RCObject *item)
1619 {
1620 size_t size = Size(item)/sizeof(uint32_t);
1621 uint32_t *p = (uint32_t*)item;
1622 uint32_t *limit = p+size;
1623 // skip vtable, first 4 bytes are cleared in Alloc
1624 p++;
1625#ifdef MMGC_64BIT
1626 p++; // vtable is 8-bytes
1627 size--;
1628#endif
1629 // in incrementalValidation mode manually deleted items
1630 // aren't put on the freelist so skip them
1631 if(incrementalValidation) {
1632 if(*p == uint32_t(GCHeap::FXFreedPoison))
1633 return;
1634 }
1635 for ( ; p < limit ; p++ )
1636 {
1637 if(*p)
1638 {
1639 PrintAllocStackTrace(item);
1640 GCAssertMsg(false, "RCObject didn't clean up itself.")do { } while (0);
1641 // If you hit this assert, use the debugger to cast 'item' to the type indicated by the call stack
1642 // in the output (you'll have to qualify the scope - (coreplayer::View*) rather than (View*)
1643 // and check to see that all fields are 0 (even boolean members). This Is a
1644 // performance optimization that lets the GC avoid zero'ing the whole thing itself.
1645 }
1646 }
1647 }
1648
1649#endif
1650
1651 void GC::gclog(const char *format, ...)
1652 {
1653 char buf[4096];
1654 va_list argptr;
1655
1656 va_start(argptr, format)__builtin_va_start(argptr, format);
1657 VMPI_vsnprintf::vsnprintf(buf, ARRAY_SIZE(buf)(sizeof(buf)/sizeof(buf[0])), format, argptr);
1658 va_end(argptr)__builtin_va_end(argptr);
1659
1660 VMPI_log(buf);
1661
1662 // log gross stats any time anything interesting happens
1663 static boolbool g_in_gclog = falsefalse;
1664
1665 boolbool was_in_gclog;
1666 {
1667 MMGC_LOCK(heap->gclog_spinlock)MMgc::GCAcquireSpinlock _lock(&heap->gclog_spinlock);
1668 was_in_gclog = g_in_gclog;
1669 g_in_gclog = truetrue;
1670 }
1671
1672 if(!was_in_gclog && !destroying)
1673 heap->DumpMemoryInfo();
1674
1675 {
1676 MMGC_LOCK(heap->gclog_spinlock)MMgc::GCAcquireSpinlock _lock(&heap->gclog_spinlock);
1677 g_in_gclog = was_in_gclog;
1678 }
1679 }
1680
1681 void GC::DumpAlloc(GCAlloc *a, size_t& internal_waste, size_t& overhead)
1682 {
1683 int inUse, maxAlloc;
1684 a->GetAllocStats(inUse, maxAlloc);
1685 inUse *= a->GetItemSize();
1686 maxAlloc *= a->GetItemSize();
1687
1688 overhead = maxAlloc-inUse;
1689 internal_waste = 0;
1690
1691 int efficiency = maxAlloc > 0 ? inUse * 100 / maxAlloc : 100;
1692 if(inUse) {
1693 const char *name = a->ContainsPointers() ? a->ContainsRCObjects() ? "rc" : "gc" : "opaque";
1694 if(heap->config.verbose)
1695 GCLog("[mem] gc[%d] %s allocator: %d%% efficiency %d bytes (%d kb) in use out of %d bytes (%d kb)\n", a->GetItemSize(), name, efficiency, inUse, inUse>>10, maxAlloc, maxAlloc>>10);
1696#ifdef MMGC_MEMORY_PROFILER
1697 if(heap->HooksEnabled())
1698 {
1699 size_t askSize = a->GetTotalAskSize();
1700 internal_waste = inUse - askSize;
1701 size_t internal_efficiency = askSize * 100 / inUse;
1702 if(heap->config.verbose)
1703 GCLog("\t\t\t\t %u%% internal efficiency %u bytes (%u kb) actually requested out of %d bytes(%d kb)\n", internal_efficiency, (uint32_t) askSize, (uint32_t)(askSize>>10), inUse, inUse>>10);
1704 }
1705#endif
1706 }
1707 }
1708
1709 void GC::DumpMemoryInfo()
1710 {
1711 size_t total = GetNumBlocks() * GCHeap::kBlockSize;
1712
1713 size_t ask;
1714 size_t allocated;
1715 GetUsageInfo(ask, allocated);
1716
1717 heap->log_percentage("[mem] \tmanaged overhead ", total-allocated, total);
1718#ifdef MMGC_MEMORY_PROFILER
1719 if(heap->HooksEnabled())
1720 heap->log_percentage("[mem] \tmanaged internal wastage", allocated-ask, allocated);
1721#endif
1722
1723 if(ticksToMillis(markTicks()) != 0 && bytesMarked() != 0) {
1724 uint32_t markRate = (uint32_t) (bytesMarked() / (1024 * ticksToMillis(markTicks()))); // kb/ms == mb/s
1725 GCLog("[mem] \tmark rate %u mb/s\n", markRate);
1726 }
1727 GCLog("[mem] \tmark increments %d\n", markIncrements());
1728 GCLog("[mem] \tsweeps %d \n", sweeps);
1729
1730 size_t total_overhead = 0;
1731 size_t total_internal_waste = 0;
1732 GCAlloc** allocators[] = {containsPointersRCAllocs, containsPointersAllocs, noPointersAllocs};
1733 for(int j = 0;j<3;j++)
1734 {
1735 GCAlloc** gc_alloc = allocators[j];
1736
1737 for(int i=0; i < kNumSizeClasses; i++)
1738 {
1739 size_t internal_waste;
1740 size_t overhead;
1741 DumpAlloc(gc_alloc[i], internal_waste, overhead);
1742 total_internal_waste += internal_waste;
1743 total_overhead += overhead;
1744 }
1745 }
1746 GCLog("Overhead %u bytes (%u kb)\n", (uint32_t)total_overhead, (uint32_t)(total_overhead>>10));
1747#ifdef MMGC_MEMORY_PROFILER
1748 if(heap->HooksEnabled())
1749 GCLog("Internal Wastage %u bytes (%u kb)\n", (uint32_t)total_internal_waste, (uint32_t)(total_internal_waste>>10));
1750#endif
1751 }
1752
1753#ifdef MMGC_MEMORY_PROFILER
1754 // It only makes sense to call this after a END_FinalizeAndSweep event and
1755 // before the next START_StartIncrementalMark event.
1756 void GC::DumpPauseInfo()
1757 {
1758 if (!nogc && incremental) {
1759 GCLog("[mem] \tpauses in GC, most recent (ms): startmark=%.2f incrementalmark=%.2f finalscan=%.2f finishmark=%.2f reap=%.2f\n",
1760 double(ticksToMicros(policy.timeMaxStartIncrementalMarkLastCollection)) / 1000.0,
1761 double(ticksToMicros(policy.timeMaxIncrementalMarkLastCollection)) / 1000.0,
1762 double(ticksToMicros(policy.timeMaxFinalRootAndStackScanLastCollection)) / 1000.0,
1763 double(ticksToMicros(policy.timeMaxFinalizeAndSweepLastCollection)) / 1000.0,
1764 double(ticksToMicros(policy.timeMaxReapZCTLastCollection)) / 1000.0);
1765 GCLog("[mem] \tpauses in GC, entire run (ms): startmark=%.2f incrementalmark=%.2f finalscan=%.2f finishmark=%.2f reap=%.2f\n",
1766 double(ticksToMicros(policy.timeMaxStartIncrementalMark)) / 1000.0,
1767 double(ticksToMicros(policy.timeMaxIncrementalMark)) / 1000.0,
1768 double(ticksToMicros(policy.timeMaxFinalRootAndStackScan)) / 1000.0,
1769 double(ticksToMicros(policy.timeMaxFinalizeAndSweep)) / 1000.0,
1770 double(ticksToMicros(policy.timeMaxReapZCT)) / 1000.0);
1771 GCLog("[mem] \tpause clustering in GC, most recent: gctime=%.2fms end-to-end=%.2fms; mutator efficacy: %.2f%%\n",
1772 double(ticksToMicros(policy.timeInLastCollection)) / 1000.0,
1773 double(ticksToMicros(policy.timeEndToEndLastCollection)) / 1000.0,
1774 policy.timeInLastCollection == 0 ? // Sometimes there are no collections
1775 100.0 :
1776 double(policy.timeEndToEndLastCollection - policy.timeInLastCollection) * 100.0 / double(policy.timeEndToEndLastCollection));
1777 }
1778 }
1779#endif // MMGC_MEMORY_PROFILER
1780
1781#ifdef _DEBUG
1782
1783 void GC::CheckFreelists()
1784 {
1785 for(int i=0; i < kNumSizeClasses; i++)
1786 {
1787 containsPointersAllocs[i]->CheckFreelist();
1788 containsPointersRCAllocs[i]->CheckFreelist();
1789 noPointersAllocs[i]->CheckFreelist();
1790 }
1791 }
1792
1793 void GC::UnmarkedScan(const void *mem, size_t size)
1794 {
1795 uintptr_t lowerBound = pageMap.MemStart();
1796 uintptr_t upperBound = pageMap.MemEnd();
1797
1798 uintptr_t *p = (uintptr_t *) mem;
1799 uintptr_t *end = p + (size / sizeof(void*));
1800
1801 while(p < end)
1802 {
1803 uintptr_t val = *p++;
1804
1805 if(val < lowerBound || val >= upperBound)
1806 continue;
1807
1808 // normalize and divide by 4K to get index
1809 int bits = GetPageMapValue(val);
1810 switch(bits)
1811 {
1812 case PageMap::kNonGC:
1813 continue;
1814 case PageMap::kGCAllocPage:
1815 GCAssert(GCAlloc::ConservativeGetMark((const void*) (val&~7), true))do { } while (0);
1816 break;
1817 case PageMap::kGCLargeAllocPageFirst:
1818 GCAssert(GCLargeAlloc::ConservativeGetMark((const void*) (val&~7), true))do { } while (0);
1819 break;
1820 default:
1821 GCAssertMsg(false, "Invalid pageMap value")do { } while (0);
1822 break;
1823 }
1824 }
1825 }
1826
1827 // For every page in the address range known to this GC, scan the page conservatively
1828 // for pointers and assert that anything that looks like a pointer to an object
1829 // points to an object that's marked.
1830 //
1831 // FIXME: This looks like it could be prone to signaling false positives and crashes:
1832 // it scans memory that's marked kNonGC, and some of that memory could even be
1833 // unmapped at the VM level?
1834
1835 void GC::FindUnmarkedPointers()
1836 {
1837 if(findUnmarkedPointers)
1838 {
1839 uintptr_t m = pageMap.MemStart();
1840
1841 while(m < pageMap.MemEnd())
1842 {
1843 // divide by 4K to get index
1844 int bits = GetPageMapValue(m);
1845 if(bits == PageMap::kNonGC) {
1846 UnmarkedScan((const void*)m, GCHeap::kBlockSize);
1847 m += GCHeap::kBlockSize;
1848 } else if(bits == PageMap::kGCLargeAllocPageFirst) {
1849 GCLargeAlloc::LargeBlock *lb = (GCLargeAlloc::LargeBlock*)m;
1850 const void *item = GetUserPointer(lb->GetObject())lb->GetObject();
1851 if(GetMark(item) && lb->containsPointers) {
1852 UnmarkedScan(item, Size(item));
1853 }
1854 m += lb->GetNumBlocks() * GCHeap::kBlockSize;
1855 } else if(bits == PageMap::kGCAllocPage) {
1856 // go through all marked objects
1857 GCAlloc::GCBlock *b = (GCAlloc::GCBlock *) m;
1858 GCAlloc* alloc = (GCAlloc*)b->alloc;
1859 for (int i=0; i < alloc->m_itemsPerBlock; i++) {
1860 // If the mark is 0, delete it.
1861 void* item = (char*)b->items + alloc->m_itemSize*i;
1862 if (!GetMark(item) && b->containsPointers) {
1863 UnmarkedScan(GetUserPointer(item)item, alloc->m_itemSize - DebugSize()0);
1864 }
1865 }
1866
1867 m += GCHeap::kBlockSize;
1868 }
1869 }
1870 }
1871 }
1872
1873#endif // DEBUG
1874
1875 void GC::StartIncrementalMark()
1876 {
1877 policy.signal(GCPolicyManager::START_StartIncrementalMark); // garbage collection starts
1878
1879 GCAssert(!marking)do { } while (0);
1880 GCAssert(!collecting)do { } while (0);
1881 GCAssert(!Reaping())do { } while (0); // bugzilla 564800
1882
1883 lastStartMarkIncrementCount = markIncrements();
1884
1885 // set the stack cleaning trigger
1886 stackCleaned = falsefalse;
1887
1888 marking = truetrue;
1889
1890 GCAssert(m_incrementalWork.Count() == 0)do { } while (0);
1891 GCAssert(m_barrierWork.Count() == 0)do { } while (0);
1892
1893 SweepNeedsSweeping();
1894
1895 // at this point every object should have no marks or be marked kFreelist
1896#ifdef _DEBUG
1897 for(int i=0; i < kNumSizeClasses; i++) {
1898 containsPointersRCAllocs[i]->CheckMarks();
1899 containsPointersAllocs[i]->CheckMarks();
1900 noPointersAllocs[i]->CheckMarks();
1901 }
1902#endif
1903
1904#ifdef MMGC_HEAP_GRAPH
1905 markerGraph.clear();
1906#endif
1907
1908 if (incremental)
1909 MarkNonstackRoots();
1910
1911 policy.signal(GCPolicyManager::END_StartIncrementalMark);
1912
1913 // FIXME (policy): arguably a bug to do this here if StartIncrementalMark has exhausted its quantum
1914 // doing eager sweeping.
1915
1916 if (incremental)
1917 IncrementalMark();
1918 }
1919
1920 // This is the default value for the 'interiorPtrs' flag for non-stack memory.
1921 // For stack memory the value is always 'true'.
1922
1923#ifdef MMGC_INTERIOR_PTRS
1924 #define MMGC_INTERIOR_PTRS_FLAGfalse truetrue
1925#else
1926 #define MMGC_INTERIOR_PTRS_FLAGfalse falsefalse
1927#endif
1928
1929 // Note: The mark stack overflow logic depends on this calling MarkItem_* directly
1930 // for each of the roots.
1931 //
1932 // Note: Root splitting must be supported to support "stacks" (AS1 stack, alloca).
1933 // See https://bugzilla.mozilla.org/show_bug.cgi?id=634651 for more information.
1934 // If the API proposed there can land we can almost certainly stop splitting
1935 // objects here.
1936
1937 void GC::MarkNonstackRoots(boolbool deep)
1938 {
1939 MarkRoots(deep, falsefalse);
1940 }
1941
1942 void GC::MarkStackRoots()
1943 {
1944 MarkRoots(falsefalse, truetrue);
1945 }
1946
1947 void GC::MarkRoots(boolbool deep, boolbool stackroots)
1948 {
1949 if (!stackroots) {
1950 // Mark objects owned by the GC as if they were rooted
1951 TraceLocation(&emptyWeakRef);
1952 TraceLocation(&lockedObjects);
1953 }
1954
1955 // Need to do this while holding the root lock so we don't end
1956 // up trying to scan a deleted item later, another reason to keep
1957 // the root set small.
1958
1959 MMGC_LOCK(m_rootListLock)MMgc::GCAcquireSpinlock _lock(&m_rootListLock);
1960 markerActive++;
1961 GCRoot *r = m_roots;
1962 while(r) {
1963 if (r->IsExactlyTraced()) {
1964 boolbool result = r->gcTrace(this, 0);
1965 (void)result;
1966 GCAssertMsg(result == false, "A GCRoot tracer must never return true.")do { } while (0);
1967 }
1968 else {
1969 const void* object;
1970 uint32_t size;
1971 boolbool isStackMemory;
1972 r->GetConservativeWorkItem(object, size, isStackMemory);
1973 if(object != NULL__null && isStackMemory == stackroots) {
1974 GCAssert(!IsPointerToGCPage(object))do { } while (0);
1975 GCAssert(!IsPointerToGCPage((char*)object + size - 1))do { } while (0);
1976 // If this root will be split push a sentinel item and store
1977 // a pointer to it in the root. This will allow us to clean
1978 // the stack if the root is deleted. See GCRoot::Destroy and
1979 // GC::HandleLargeItem
1980 if(size > kMarkItemSplitThreshold) {
1981 // Push a sentinel item for protection, and if it succeeded then
1982 // register the location of the sentinel item on the mark stack
1983 // with the root.
1984 if (Push_RootProtector(r))
1985 r->SetMarkStackSentinelPointer(m_incrementalWork.Top());
1986 }
1987 // Note: here we could push a kStackMemory instead of a kGCRoot, if stackroots==true.
1988 // That would mean that interior pointers from StackMemory roots would be recognized,
1989 // which may or may not be right - it's not something we've done so far.
1990 MarkItem_ConservativeOrNonGCObject(object, size, GCMarkStack::kLargeRootChunk, r, MMGC_INTERIOR_PTRS_FLAGfalse);
1991 }
1992 }
1993 if (deep)
1994 Mark();
1995 r = r->next;
1996 }
1997 markerActive--;
1998 }
1999
2000 // Recover from a mark stack overflow.
2001 //
2002 // Mark stack overflow occurs when an item cannot be pushed onto the mark stack because
2003 // the top mark stack segment is full and a new segment can't be allocated. In
2004 // practice, any call to the GC::Push_* methods (or their callers, such as GC::Mark,
2005 // GC::MarkItem_*, GC::MarkNonStackRoots, GC::MarkStackRoots, GC::MarkQueueAndStack, and not
2006 // least the write barrier GC::TrapWrite) can cause a mark stack overflow.
2007 //
2008 // Since garbage collection cannot be allowed to abort the program as a result of
2009 // the allocation failure, but must run to completion, overflow is handled specially.
2010 // The mark stack Push method returns an error code, which is detected in GC::Push_*,
2011 // which in turn calls GC::SignalMarkStackOverflow to handle the overflow. Overflow
2012 // is handled by discarding some elements and setting the global m_markStackOverflow flag.
2013 //
2014 // Any code that needs to drive marking to completion - FinishIncrementalMark and Sweep
2015 // do, as they depend on the heap having been marked before running finalizers and
2016 // clearing out empty blocks - must test m_markStackOverflow: if it is set then it must
2017 // be cleared and the present method, GC::HandleMarkStackOverflow, is called to mop up.
2018 // The caller must perform this test repeatedly until the flag is no longer set.
2019 //
2020 // The job of HandleMarkStackOverflow is to find marked heap objects that point to
2021 // unmarked objects, and to mark those objects. Effectively it uses the heap as a
2022 // queue of candidate objects, thus minimizing the use of the mark stack. Since marking
2023 // uses the mark stack the marking performed by HandleMarkStackOverflow may also
2024 // overflow the mark stack, but once a marked object has been visited by
2025 // HandleMarkStackOverflow it will not point to any unmarked objects. There are object
2026 // graphs containing pointers from objects visited later to objects skipped earlier
2027 // that may require recovery to be run multiple times, if marking overflows the mark
2028 // stack, but each recovery pass will mark all objects where the reference is from
2029 // an earlier object to a later object, and all non-overflowing subgraphs of those
2030 // objects in either direction.
2031 //
2032 // Performance might be an issue as the restart may happen several times and the
2033 // scanning performed by HandleMarkStackOverflow covers the entire heap every time. It could
2034 // be more efficient to interleave restarting and marking (eg, never fill the mark stack
2035 // during restarting, but mark after filling it partly, and remember the place in the heap
2036 // where scanning left off, and then iterate), but we should cross that bridge if and when
2037 // restarting turns out to be a problem in practice. (If it does, caching more mark stack
2038 // segments may be a better first fix, too.)
2039
2040 void GC::HandleMarkStackOverflow()
2041 {
2042 // Crucial for completion that we do not push marked items. MarkItem_* handles this
2043 // for us: it pushes referenced objects that are not marked. (MarkNonstackRoots calls
2044 // MarkItem_* on each root.)
2045
2046 MarkNonstackRoots(truetrue);
2047
2048 // For all iterator types, GetNextMarkedObject returns true if 'item' has been
2049 // updated to reference a marked, non-free object to mark, false if the allocator
2050 // instance has been exhausted.
2051
2052 void* ptr;
2053
2054 markerActive++;
2055
2056 for(int i=0; i < kNumSizeClasses; i++) {
2057 GCAllocIterator iter1(containsPointersRCAllocs[i]);
2058 while (iter1.GetNextMarkedObject(ptr)) {
2059 MarkItem_GCObject(ptr);
2060 Mark();
2061 }
2062 GCAllocIterator iter2(containsPointersAllocs[i]);
2063 while (iter2.GetNextMarkedObject(ptr)) {
2064 MarkItem_GCObject(ptr);
2065 Mark();
2066 }
2067 }
2068
2069 GCLargeAllocIterator iter3(largeAlloc);
2070 while (iter3.GetNextMarkedObject(ptr)) {
2071 MarkItem_GCObject(ptr);
2072 Mark();
2073 }
2074
2075 markerActive--;
2076 }
2077
2078 // Signal that attempting to push 'item' onto 'stack' overflowed 'stack'.
2079 //
2080 // Either 'item' must be pushed onto the stack, replacing some other item there,
2081 // or any state information in the item that indicates that it is stacked must
2082 // be cleared, since it will not be pushed onto the stack. What to do?
2083 //
2084 // The literature (Jones & Lins) does not provide a lot of guidance. Intuitively it
2085 // seems that we want to maximize the number of items that remain on the stack so
2086 // that the marker will finish processing the maximal number of objects before we
2087 // enter the stack overflow recovery code (in HandleMarkStackOverflow, above). Ergo
2088 // we drop 'item' on the floor.
2089
2090 void GC::SignalMarkStackOverflow_GCObject(const void* p)
2091 {
2092 GCAssert(p != NULL)do { } while (0);
2093 ClearQueued(p);
2094 m_markStackOverflow = truetrue;
2095 }
2096
2097 void GC::SignalMarkStackOverflow_NonGCObject()
2098 {
2099 m_markStackOverflow = truetrue;
2100 }
2101
2102 // HandleLargeItem handles work items that are too big to be
2103 // marked atomically. It does so by splitting the work into chunks;
2104 // the incremental marker then gets an opportunity to stop marking
2105 // after processing each chunk.
2106 //
2107 // An object is "too large" if its size is larger than kLargestAlloc.
2108 // There is an older technical reason for that having to do with
2109 // additional state space only being available in large objects,
2110 // as detailed in the following.
2111 //
2112 // Conceptually we split a large work item into two chunks: a
2113 // small head (which we mark now) and a large tail (which we push
2114 // onto the stack). THE HEAD MUST BE MARKED FIRST because this
2115 // ensures that the mark on the object is set immediately; that
2116 // is necessary for write barrier correctness.
2117 //
2118 // Why use kLargestAlloc as the split value?
2119 //
2120 // Unfortunately for correctness THE HEAD MUST ALSO BE MARKED LAST,
2121 // because the queued bit is traditionally used to prevent the object
2122 // from being deleted explicitly by GC::Free while the object is
2123 // on the mark stack. We can't have it both ways, so split objects
2124 // are protected against being freed by carrying another bit that
2125 // prevents deletion when it is set. We only have this extra bit on
2126 // large objects (where there's plenty of space). Thus the cutoff for
2127 // splitting is exactly kLargestObject: only large objects that can
2128 // carry this bit are split. (That reason predates the expansion
2129 // of per-object bits from four to eight. Now that we have more header
2130 // bits we could carry the bit everywhere and choose a different
2131 // threshold.)
2132 //
2133 // In addition, the queued flag still prevents the object from
2134 // being deleted.
2135 //
2136 // The free-protection flags are reset by a special GCWorkItem sentinel
2137 // that is pushed on the stack below the two parts of the object that
2138 // is being split; when the sentinel is later popped, it resets the flag
2139 // and returns.
2140 //
2141 // The complexity with the free-protection flags may go away if we
2142 // move to a write barrier that does not depend on the queued bit,
2143 // for example, a card-marking barrier.
2144 //
2145 // If a mark stack overflow occurs the large tail may be popped
2146 // and discarded. This is not a problem: the object as a whole
2147 // is marked, but points to unmarked storage, and the latter
2148 // objects will be picked up as per normal. Discarding the
2149 // tail is entirely equivalent to discarding the work items that
2150 // would result from scanning the tail.
2151 //
2152 // Large GCRoots are also split here. MarkNonstackRoots pushes a
2153 // sentinel item on the stack that will be right below the GCRoot.
2154 // If the GCRoot is deleted it uses the sentinel pointer to clear
2155 // the tail work item from GCRoot::Destroy along with the sentinel
2156 // itself.
2157 //
2158 // Conservatively marked objects are split so that the tail is a
2159 // non-gcobject regardless of whether the head is a gcobject.
2160 //
2161 // Exactly marked objects are not split per se, but state is kept on
2162 // the mark stack to allow the gcTrace method to be called repeatedly.
2163 //
2164 // When a large object is first encountered it is marked as visited, and
2165 // that's necessary for the write barrier. However, it remains on the queue
2166 // without being marked as queued (since marked+queued == free) and anyway
2167 // that would prevent it from being added to the write barrier's queue.
2168 // Thus it is possible for an object, or part of it, to be on both
2169 // queues at the same time. That's not incorrect, and it's not even
2170 // particularly inefficient, as the best we could do would be to pop
2171 // the object off the mark stack when it is pushed onto the barrier queue.
2172 //
2173 // Given partial marking of large objects it's not obvious that marking
2174 // will terminate (an object could be moved off the barrier stack by
2175 // IncrementalMark, then copied back onto the barrier stack by the write
2176 // barrier), but it does: mark work is driven by allocation, and when
2177 // allocation has reached a limit the final phase of GC is run which marks
2178 // whatever remains to be marked nonincrementally.
2179
2180 void GC::SplitItem_ConservativeOrNonGCObject(const void *userptr, uint32_t& size, GCMarkStack::TypeTag type, const void* baseptr)
2181 {
2182 if (type == GCMarkStack::kGCObject)
2183 {
2184 // An ounce of prevention...
2185 GCAssert(ContainsPointers(userptr))do { } while (0);
2186
2187 // Exactly traced GCItems should have been handled inside MarkItem_GCObject
2188 GCAssert(!(GetGCBits(GetRealPointer(userptr)) & kVirtualGCTrace))do { } while (0);
2189
2190 // Need to protect it against 'Free': push a magic item representing this object
2191 // that will prevent this split item from being freed, see comment block above.
2192
2193 GCLargeAlloc::ProtectAgainstFree(userptr);
2194 Push_LargeObjectProtector(userptr);
2195 }
2196
2197 const void *chunkptr = (const void*)((uintptr_t)userptr + kLargestAlloc);
2198 uint32_t chunksize = uint32_t(size - kLargestAlloc);
2199 switch (type) {
2200 case GCMarkStack::kStackMemory:
2201 Push_StackMemory(chunkptr, chunksize, baseptr);
2202 break;
2203
2204 case GCMarkStack::kLargeRootChunk:
2205 Push_LargeRootChunk(chunkptr, chunksize, baseptr);
2206 break;
2207
2208 case GCMarkStack::kGCObject:
2209 case GCMarkStack::kLargeObjectChunk:
2210 Push_LargeObjectChunk(chunkptr, chunksize, baseptr);
2211 break;
2212
2213 default:
2214 GCAssert(!"Unexpected item passed to SplitItem_ConservativeOrNonGCObject")do { } while (0);
2215 }
2216 size = kMarkItemSplitThreshold;
2217 }
2218
2219 // The mark has been set on the object, and the mark work for the entire object
2220 // has been (or is about to be) signaled to the policy manager. The object's
2221 // gcTrace method has been called with cursor==0, and it returned 'true' indicating
2222 // that more marking is required.
2223 //
2224 // We keep the object marked though it remains on the queue, this means it may also
2225 // end up on the barrier stack while still on the mark stack but that's fine, and
2226 // inevitable.
2227 //
2228 // Here we set up for further marking by creating a mark state for a large object.
2229 // (If the object is not large we mark it synchronously in-line and just return.)
2230 //
2231 // The state consists of three entries: an item that protects the object against
2232 // deletion (because the object state is marked-not-queued it's not enough to look
2233 // at the queued bit in GC::Free), and two items that contain the cursor into the object.
2234 //
2235 // The large-object deletion protection item needs to be further down the stack than
2236 // the other two.
2237 //
2238 // In priniciple the entire state should go below the items that were pushed by the first
2239 // invocation of gcTrace() on the object, but that's only required to control mark stack
2240 // growth. It's OK to push them on top of the items already pushed; doing so
2241 // means the mark stack may be up to twice as large as it would have been had the
2242 // state been pushed below, but that's it. It is possible to push the state underneath,
2243 // but (a) it adds some code on the fast path of MarkItem_GCObject and (b) it
2244 // introduces some complexity in the code below, as the stack has to be rearranged to
2245 // allow elements to be inserted like that.
2246
2247 void GC::SplitExactGCObject(const void *userptr)
2248 {
2249 GCAssert((GetGCBits(GetRealPointer(userptr)) & (kVirtualGCTrace|kMark)) == (kVirtualGCTrace|kMark))do { } while (0);
2250
2251 // If it's not a large object then mark it until we're done.
2252 // extensions/ST_mmgc_exact.st has a test that hits this case.
2253
2254 if (!GCLargeAlloc::IsLargeBlock(userptr))
2255 {
2256 size_t cursor = 1;
2257 while (((GCTraceableBase*)userptr)->gcTrace(this, cursor++))
2258 ;
2259 return;
2260 }
2261
2262 // We have a bona fide large object.
2263
2264 // Need to protect it against 'Free': push a magic item representing this object
2265 // that will prevent this split item from being freed, see comment block above.
2266
2267 GCLargeAlloc::ProtectAgainstFree(userptr);
2268 Push_LargeObjectProtector(userptr);
2269
2270 // Create a mark state for the large, exactly traced object.
2271 //
2272 // We have two words of information to store: the pointer to the object
2273 // (which we need in order to get the vtable) and the cursor value. The
2274 // cursor value is not really very well bounded, and we don't want the
2275 // sentinel value to have too many insignificant low-order bits, so we
2276 // need to use two items here.
2277 //
2278 // It's important that the top item triggers action in the marker and that
2279 // the bottom item is inert and is just discarded if it appears by itself,
2280 // which is possible in the presence of mark stack overflows.
2281
2282 // Save the state: initial cursor.
2283 Push_LargeExactObjectTail(userptr, 1);
2284 }
2285
2286 // Pop an item and handle it; GC objects should be handled in the caller.
2287
2288 void GC::MarkTopItem_NonGCObject()
2289 {
2290 switch (m_incrementalWork.PeekTypetag()) {
2291 default:
2292 GCAssert(!"Unhandled mark item tag")do { } while (0);
2293 break;
2294
2295 case GCMarkStack::kLargeExactObjectTail: {
2296 const void* ptr;
2297 size_t cursor;
2298 m_incrementalWork.Pop_LargeExactObjectTail(ptr, cursor);
2299 MarkItem_ExactObjectTail(ptr, cursor);
2300 break;
2301 }
2302
2303 case GCMarkStack::kStackMemory: {
2304 const void* ptr;
2305 const void* baseptr;
2306 uint32_t size;
2307 m_incrementalWork.Pop_StackMemory(ptr, size, baseptr);
2308 MarkItem_ConservativeOrNonGCObject(ptr, size, GCMarkStack::kStackMemory, baseptr, truetrue);
2309 break;
2310 }
2311
2312 case GCMarkStack::kLargeObjectChunk: {
2313 const void* ptr;
2314 const void* baseptr;
2315 uint32_t size;
2316 m_incrementalWork.Pop_LargeObjectChunk(ptr, size, baseptr);
2317 MarkItem_ConservativeOrNonGCObject(ptr, size, GCMarkStack::kLargeObjectChunk, baseptr, MMGC_INTERIOR_PTRS_FLAGfalse);
2318 break;
2319 }
2320
2321 case GCMarkStack::kLargeRootChunk: {
2322 const void* ptr;
2323 const void* baseptr;
2324 uint32_t size;
2325 m_incrementalWork.Pop_LargeRootChunk(ptr, size, baseptr);
2326 MarkItem_ConservativeOrNonGCObject(ptr, size, GCMarkStack::kLargeRootChunk, baseptr, MMGC_INTERIOR_PTRS_FLAGfalse);
2327 break;
2328 }
2329
2330 case GCMarkStack::kRootProtector: {
2331 const void* ptr;
2332 m_incrementalWork.Pop_RootProtector(ptr);
2333 GCRoot *sentinelRoot = (GCRoot*)ptr;
2334 // The GCRoot is no longer on the stack, clear the pointers into the stack.
2335 sentinelRoot->ClearMarkStackSentinelPointer();
2336 break;
2337 }
2338
2339 case GCMarkStack::kLargeObjectProtector: {
2340 const void* ptr;
2341 m_incrementalWork.Pop_LargeObjectProtector(ptr);
2342 // Unprotect an item that was protected earlier, see comment block above.
2343 GCLargeAlloc::UnprotectAgainstFree(ptr);
2344 break;
2345 }
2346 }
2347 }
2348
2349 // This will mark the item whether the item was previously marked or not.
2350 // The mark stack overflow logic depends on that.
2351 //
2352 // The common case is a small and exactly traced gc-item, and the marker
2353 // biases in favor of that, as well as large exactly traced gc-items.
2354 //
2355 // Large gc-items are recognizable by a header bit designating the object
2356 // as large, the bit is set in the large-object allocator.
2357
2358 void GC::MarkItem_GCObject(const void *userptr)
2359 {
2360 GCAssert(markerActive)do { } while (0);
2361
2362 // Can't assert the general WorkItemInvariants here - some objects on the mark
2363 // stack may have been cleared out through explicit deletion or such objects
2364 // may have been transfered from the barrier stack, and there's nothing we
2365 // can do about it now. (Explicit deletion needs to be removed from the API.)
2366 // The exact marker still works because the mark-exactly bit is cleared in
2367 // AbortFree. However, explicitly deleted objects that are queued
2368 // will be marked conservatively and will sometimes show up as false positive
2369 // in the profiling of conservatively marked objects.
2370
2371 // The common case is an exactly traced gc item (and an important subcase is
2372 // an item that can be traced exactly by a single call to gcTrace).
2373
2374 uint32_t size = uint32_t(GC::Size(userptr));
2375 const void *realptr = GetRealPointer(userptr)userptr;
2376 GCAssert(GetGC(realptr) == this)do { } while (0);
2377 GCAssert(IsPointerToGCObject(realptr))do { } while (0);
2378 (void)realptr;
2379
2380#if defined VMCFG_EXACT_TRACING || defined VMCFG_SELECTABLE_EXACT_TRACING
2381 gcbits_t& bits = GetGCBits(realptr);
2382
2383 if (bits & kVirtualGCTrace)
2384 {
2385 // Inlined and merged SetMark, since we have the bits anyway.
2386 bits = (bits & ~kQueued) | kMark;
2387
2388 if (((GCTraceableBase*)userptr)->gcTrace(this, 0))
2389 SplitExactGCObject(userptr);
2390
2391 // EXACTGC INVESTIGATEME - Not clear if this could be done before marking.
2392 // EXACTGC INVESTIGATEME - We're signaling the mark work for the entire
2393 // object here, even if that object is being split. Can that go wrong somehow,
2394 // eg, will it upset the computation of the mark rate?
2395
2396 policy.signalExactMarkWork(size);
2397 return;
2398 }
2399#endif
2400
2401 MarkItem_ConservativeOrNonGCObject(userptr, size, GCMarkStack::kGCObject, userptr, MMGC_INTERIOR_PTRS_FLAGfalse);
2402 }
2403
2404 void GC::MarkItem_ExactObjectTail(const void* userptr, size_t cursor)
2405 {
2406 // Set up for some more tracing.
2407 GCTraceableBase* exactlyTraced = reinterpret_cast<GCTraceableBase*>(const_cast<void*>(userptr));
2408
2409 // Save the new state.
2410 Push_LargeExactObjectTail(userptr, cursor+1);
2411 uintptr_t e1 = m_incrementalWork.Top();
2412
2413 if (!exactlyTraced->gcTrace(this, cursor))
2414 {
2415 // No more mark work, so clean up the mark state.
2416 m_incrementalWork.ClearItemAt(e1);
2417 }
2418 }
2419
2420 void GC::MarkItem_ConservativeOrNonGCObject(const void *userptr, uint32_t size, GCMarkStack::TypeTag type, const void* baseptr, boolbool interiorPtrs)
2421 {
2422 // Conservative tracing.
2423
2424 // First we consider whether to split the object into multiple pieces.
2425 //
2426 // Ideally we'd choose a limit that isn't "too small" because then we'll
2427 // split too many objects, and isn't "too large" because then the mark
2428 // stack growth won't be throttled properly.
2429 //
2430 // See bugzilla #495049 for a discussion of this problem.
2431 //
2432 // See comments above HandleLargeItem for the mechanics of how this
2433 // is done and why kMarkItemSplitThreshold == kLargestAlloc.
2434
2435 if (size > kMarkItemSplitThreshold)
2436 SplitItem_ConservativeOrNonGCObject(userptr, size, type, baseptr);
2437
2438 if (type == GCMarkStack::kGCObject)
2439 SetMark(userptr);
2440
2441 policy.signalConservativeMarkWork(size);
2442#ifdef MMGC_CONSERVATIVE_PROFILER
2443 if (demos != NULL__null)
2444 {
2445 switch (type) {
2446 case GCMarkStack::kStackMemory:
2447 demos->accountForStack(size);
2448 break;
2449 case GCMarkStack::kLargeRootChunk:
2450 demos->accountForRoot(size);
2451 break;
2452 case GCMarkStack::kGCObject:
2453 GCAssert(!(GetGCBits(GetRealPointer(baseptr)) & kVirtualGCTrace))do { } while (0);
2454 demos->accountForObject(baseptr);
2455 break;
2456 case GCMarkStack::kLargeObjectChunk:
2457 // Skip it - it's a chunk of a large object that's been split, it has already
2458 // been accounted for because the accounting for the header piece (kGCObject)
2459 // will account for the entire object
2460 break;
2461 default:
2462 GCAssert(!"Unknown tag in MarkItem_ConservativeOrNonGCObject")do { } while (0);
2463 break;
2464 }
2465 }
2466#endif
2467
2468 uintptr_t *p = (uintptr_t*) userptr;
2469 uintptr_t *end = p + (size / sizeof(void*));
2470 while(p < end)
2471 {
2472 TraceConservativePointer(uintptr_t(*p), interiorPtrs HEAP_GRAPH_ARG(p));
2473 p++;
2474 }
2475 }
2476
2477 void GC::TraceConservativePointer(uintptr_t val, boolbool handleInteriorPtrs HEAP_GRAPH_ARG(uintptr_t* loc))
2478 {
2479#ifdef MMGC_POINTINESS_PROFILING
2480 uint32_t could_be_pointer = 0;
2481 uint32_t actually_is_pointer = 0;
2482#endif
2483 int bits;
2484#ifdef MMGC_VALGRIND
2485 if (handleInteriorPtrs) {
2486 VALGRIND_MAKE_MEM_DEFINED(&val, sizeof(val)){};
2487 }
2488#endif // MMGC_VALGRIND
2489
2490 if(val < pageMap.MemStart() || val >= pageMap.MemEnd())
2491 goto end;
2492
2493#ifdef MMGC_POINTINESS_PROFILING
2494 could_be_pointer++;
2495#endif
2496
2497 // normalize and divide by 4K to get index
2498 bits = GetPageMapValue(val);
2499
2500 if (bits == PageMap::kGCAllocPage)
2501 {
2502 const void *item;
2503 int itemNum;
2504 GCAlloc::GCBlock *block = (GCAlloc::GCBlock*) (val & GCHeap::kBlockMask);
2505
2506 if (handleInteriorPtrs)
2507 {
2508 item = (void*) val;
2509
2510 // guard against bogus pointers to the block header
2511 if(item < block->items)
2512 goto end;
2513
2514 itemNum = GCAlloc::GetObjectIndex(block, item);
2515
2516 // adjust |item| to the beginning of the allocation
2517 item = block->items + itemNum * block->size;
2518 }
2519 else
2520 {
2521 // back up to real beginning
2522 item = GetRealPointer((const void*) (val & ~7))(const void*) (val & ~7);
2523
2524 // guard against bogus pointers to the block header
2525 if(item < block->items)
2526 goto end;
2527
2528 itemNum = GCAlloc::GetObjectIndex(block, item);
2529
2530 // if |item| doesn't point to the beginning of an allocation,
2531 // it's not considered a pointer.
2532 if (block->items + itemNum * block->size != item)
2533 {
2534#ifdef MMGC_64BIT
2535 // Doubly-inherited classes have two vtables so are offset 8 more bytes than normal.
2536 // Handle that here (shows up with PlayerScriptBufferImpl object in the Flash player)
2537 if ((block->items + itemNum * block->size + sizeof(void *)) == item)
2538 item = block->items + itemNum * block->size;
2539 else
2540#endif // MMGC_64BIT
2541 goto end;
2542 }
2543 }
2544
2545#ifdef MMGC_POINTINESS_PROFILING
2546 actually_is_pointer++;
2547#endif
2548
2549 gcbits_t& bits2 = block->bits[GCAlloc::GetBitsIndex(block, item)];
2550 if ((bits2 & (kMark|kQueued)) == 0)
2551 {
2552 uint32_t itemSize = block->size - (uint32_t)DebugSize()0;
2553 if(block->containsPointers)
2554 {
2555 const void *realItem = GetUserPointer(item)item;
2556 // EXACTGC INVESTIGATEME - is recursive marking still a good idea? Note that we
2557 // expect the bulk of the marking to be exact, and exact marking is not recursive,
2558 // it always uses the mark stack. (On the other hand it's type-aware and can
2559 // avoid pushing leaf objects onto the mark stack.) If conservative marking is
2560 // merely a last-ditch mechanism then there's little reason to assume that
2561 // recursive marking will buy us much here.
2562 uintptr_t thisPage = val & GCHeap::kBlockMask;
2563 if(((uintptr_t)realItem & GCHeap::kBlockMask) != thisPage || mark_item_recursion_control == 0)
2564 {
2565 bits2 |= kQueued;
2566 Push_GCObject(realItem);
2567 }
2568 else
2569 {
2570 mark_item_recursion_control--;
2571 MarkItem_GCObject(realItem);
2572 mark_item_recursion_control++;
2573 }
2574 }
2575 else
2576 {
2577 bits2 |= kMark;
2578 policy.signalPointerfreeMarkWork(itemSize);
2579 }
2580#ifdef MMGC_HEAP_GRAPH
2581 markerGraph.edge(loc, GetUserPointer(item)item);
2582#endif
2583 }
2584 }
2585 else if (bits == PageMap::kGCLargeAllocPageFirst || (handleInteriorPtrs && bits == PageMap::kGCLargeAllocPageRest))
2586 {
2587 const void* item;
2588
2589 if (handleInteriorPtrs)
2590 {
2591 if (bits == PageMap::kGCLargeAllocPageFirst)
2592 {
2593 // guard against bogus pointers to the block header
2594 if ((val & GCHeap::kOffsetMask) < sizeof(GCLargeAlloc::LargeBlock))
2595 goto end;
2596
2597 item = (void *) ((val & GCHeap::kBlockMask) | sizeof(GCLargeAlloc::LargeBlock));
2598 }
2599 else
2600 {
2601 item = GetRealPointer(FindBeginning((void *) val))FindBeginning((void *) val);
2602 }
2603 }
2604 else
2605 {
2606 // back up to real beginning
2607 item = GetRealPointer((const void*) (val & ~7))(const void*) (val & ~7);
2608
2609 // If |item| doesn't point to the start of the page, it's not
2610 // really a pointer.
2611 if(((uintptr_t) item & GCHeap::kOffsetMask) != sizeof(GCLargeAlloc::LargeBlock))
2612 goto end;
2613 }
2614
2615#ifdef MMGC_POINTINESS_PROFILING
2616 actually_is_pointer++;
2617#endif
2618
2619 GCLargeAlloc::LargeBlock *b = GCLargeAlloc::GetLargeBlock(item);
2620 if((b->flags[0] & (kQueued|kMark)) == 0)
2621 {
2622 uint32_t itemSize = b->size - (uint32_t)DebugSize()0;
2623 if(b->containsPointers)
2624 {
2625 b->flags[0] |= kQueued;
2626 Push_GCObject(GetUserPointer(item)item);
2627 }
2628 else
2629 {
2630 // doesn't need marking go right to black
2631 b->flags[0] |= kMark;
2632 policy.signalPointerfreeMarkWork(itemSize);
2633 }
2634#ifdef MMGC_HEAP_GRAPH
2635 markerGraph.edge(loc, GetUserPointer(item)item);
2636#endif
2637 }
2638 }
2639 end: ;
2640#ifdef MMGC_POINTINESS_PROFILING
2641 policy.signalDemographics(size/sizeof(void*), could_be_pointer, actually_is_pointer);
2642#endif
2643 }
2644
2645 void GC::TracePointer(void* obj /* user pointer */ HEAP_GRAPH_ARG(const uintptr_t *loc))
2646 {
2647 GCAssert(((uintptr_t)obj & 7) == 0)do { } while (0);
2648
2649 if (obj == NULL__null)
2650 return;
2651
2652 gcbits_t& bits2 = GetGCBits(GetRealPointer(obj)obj);
2653
2654 // Explicit deletion of objects can create dangling pointers which will hit
2655 // this assert.
2656 // More information here: https://bugzilla.mozilla.org/show_bug.cgi?id=626684
2657 // If you hit this assert, report with detailed information on the above bug
2658 GCAssertMsg((bits2 & (kMark|kQueued)) != (kMark|kQueued), "Dangling pointers hit during exact tracing, please report with the details on Bugzilla bug# 626684")do { } while (0);
2659
2660 GCAssert(ContainsPointers(obj) || (bits2 & kQueued) == 0)do { } while (0);
2661
2662 // EXACTGC OPTMIZEME: Here we can fold the ContainsPointers test into
2663 // the marked test if there's a bit in the header indicating pointerfulness.
2664
2665 if ((bits2 & (kMark|kQueued)) == 0)
2666 {
2667 if (ContainsPointers(obj)) {
2668 bits2 |= kQueued;
2669 Push_GCObject(obj);
2670 }
2671 else {
2672 bits2 |= kMark;
2673 policy.signalPointerfreeMarkWork(Size(obj));
2674 }
2675#ifdef MMGC_HEAP_GRAPH
2676 markerGraph.edge(loc, obj);
2677#endif
2678 }
2679 }
2680
2681 void GC::TraceAtomValue(avmplus::Atom a HEAP_GRAPH_ARG(avmplus::Atom *loc))
2682 {
2683 switch (a & 7)
2684 {
2685#ifdef DEBUG
2686 case avmplus::AtomConstants::kUnusedAtomTag:
2687 // Tracing is not necessary here. Generic GCObjects should have been laundered
2688 // through the genericObjectToAtom API and will be tagged kDoubleType (see comments
2689 // below for why that is a botch). Anything tagged with kUnusedAtomTag is non-GC
2690 // storage.
2691 break;
2692#endif
2693 case avmplus::AtomConstants::kObjectType:
2694 case avmplus::AtomConstants::kStringType:
2695 case avmplus::AtomConstants::kNamespaceType:
2696 TracePointer((GCTraceableBase*)(a & ~7) HEAP_GRAPH_ARG((uintptr_t*)loc));
2697 break;
2698 case avmplus::AtomConstants::kDoubleType:
2699 // Bugzilla 594870: Must trace semi-conservatively because of the genericObjectToAtom API:
2700 // the object we're looking at may or may not contain pointers, and we don't know.
2701 TracePointer((void*)(a & ~7) HEAP_GRAPH_ARG((uintptr_t*)loc));
2702 break;
2703 }
2704 }
2705
2706 void GC::IncrementalMark()
2707 {
2708 GCAssert(!markerActive)do { } while (0);
2709
2710 uint32_t time = incrementalValidation ? 1 : policy.incrementalMarkMilliseconds();
2711#ifdef _DEBUG
2712 time = 1;
2713#endif
2714
2715 SAMPLE_FRAME("[mark]", core());
2716
2717 // Don't force collections to finish if the mark stack is empty;
2718 // let the allocation budget be used up.
2719
2720 if(m_incrementalWork.Count() == 0) {
2721 CheckBarrierWork();
2722 if (m_incrementalWork.Count() == 0) {
2723 // Policy triggers off these signals, so we still need to send them.
2724 policy.signal(GCPolicyManager::START_IncrementalMark);
2725 policy.signal(GCPolicyManager::END_IncrementalMark);
2726 return;
2727 }
2728 }
2729
2730 markerActive++;
2731
2732 policy.signal(GCPolicyManager::START_IncrementalMark);
2733
2734 // FIXME: tune this so that getPerformanceCounter() overhead is noise
2735 //
2736 // *** NOTE ON THREAD SAFETY ***
2737 //
2738 // If anyone feels like writing code that updates checkTimeIncrements (it
2739 // used to be 'static' instead of 'const'), beware that using a static var
2740 // here requires a lock. It may also be wrong to have one shared global for
2741 // the value, and in any case it may belong in the policy manager.
2742
2743 const unsigned int checkTimeIncrements = 100;
2744 uint64_t start = VMPI_getPerformanceCounter();
2745
2746 uint64_t numObjects=policy.objectsMarked();
2747 uint64_t objSize=policy.bytesMarked();
2748
2749 uint64_t ticks = start + time * VMPI_getPerformanceFrequency() / 1000;
2750 do {
2751 // EXACTGC OPTIMIZEME: Count can overestimate the amount of work on the stack
2752 // because exactly traced large split items occupy two slots on the
2753 // mark stack. Thus the loop must also test for whether the stack
2754 // is empty.
2755 unsigned int count = m_incrementalWork.Count();
2756 if (count == 0) {
2757 CheckBarrierWork();
2758 count = m_incrementalWork.Count();
2759 if (count == 0)
2760 break;
2761 }
2762 if (count > checkTimeIncrements) {
2763 count = checkTimeIncrements;
2764 }
2765 Mark(count);
2766 SAMPLE_CHECK();
2767 } while(VMPI_getPerformanceCounter() < ticks);
2768
2769 policy.signal(GCPolicyManager::END_IncrementalMark);
2770
2771 markerActive--;
2772
2773 if(heap->Config().gcstats) {
2774 numObjects = policy.objectsMarked() - numObjects;
2775 objSize = policy.bytesMarked() - objSize;
2776 double millis = duration(start);
2777 size_t kb = size_t(objSize)>>10;
2778 gclog("[mem] mark(%d) %d objects (%d kb %d mb/s) in %.2f millis (%.4f s)\n",
2779 markIncrements() - lastStartMarkIncrementCount, int(numObjects), kb,
2780 uint32_t(double(kb)/millis), millis, duration(t0)/1000);
2781 }
2782 }
2783
2784 void GC::FinishIncrementalMark(boolbool scanStack, boolbool okToShrinkHeapTarget)
2785 {
2786 // Don't finish an incremental mark (i.e., sweep) if we
2787 // are in the midst of a ZCT reap.
2788 if (Reaping())
2789 {
2790 return;
2791 }
2792
2793 // It is possible, probably common, to enter FinishIncrementalMark without the
2794 // mark queue being empty. Clear out the queue synchronously here, we don't
2795 // want anything pending when we start marking roots: multiple active root protectors
2796 // for the same root is a mess.
2797
2798 Mark();
2799
2800 // Force repeated restarts and marking until we're done. For discussion
2801 // of completion, see the comments above HandleMarkStackOverflow.
2802
2803 while (m_markStackOverflow) {
2804 m_markStackOverflow = falsefalse;
2805 HandleMarkStackOverflow(); // may set
2806 FlushBarrierWork(); // m_markStackOverflow
2807 Mark(); // to true again
2808 }
2809
2810 // finished in Sweep
2811 sweepStart = VMPI_getPerformanceCounter();
2812
2813 // mark roots again, could have changed (alternative is to put WB's on the roots
2814 // which we may need to do if we find FinishIncrementalMark taking too long)
2815
2816 policy.signal(GCPolicyManager::START_FinalRootAndStackScan);
2817
2818 GCAssert(!m_markStackOverflow)do { } while (0);
2819
2820 FlushBarrierWork();
2821 MarkNonstackRoots();
2822 MarkQueueAndStack(scanStack);
2823
2824 // Force repeated restarts and marking until we're done. For discussion
2825 // of completion, see the comments above HandleMarkStackOverflow. Note
2826 // that we must use MarkQueueAndStack rather than plain Mark because there
2827 // is no guarantee that the stack was actually pushed onto the mark stack.
2828 // If we iterate several times there may be a performance penalty as a
2829 // consequence of that, but it's not likely that that will in fact happen,
2830 // and it's not obvious how to fix it.
2831
2832 while (m_markStackOverflow) {
2833 m_markStackOverflow = falsefalse;
2834 HandleMarkStackOverflow(); // may set
2835 FlushBarrierWork(); // m_markStackOverflow
2836 MarkQueueAndStack(scanStack); // to true again
2837 }
2838 ClearMarkStack(); // Frees any cached resources
2839 m_barrierWork.Clear();
2840 zct.Prune(); // Frees unused memory
2841
2842 policy.signal(GCPolicyManager::END_FinalRootAndStackScan);
2843
2844#ifdef _DEBUG
2845 // need to traverse all marked objects and make sure they don't contain
2846 // pointers to unmarked objects
2847 FindMissingWriteBarriers();
2848#endif
2849
2850 policy.signal(GCPolicyManager::START_FinalizeAndSweep);
2851 GCAssert(!collecting)do { } while (0);
2852
2853 // Sweep is responsible for setting and clearing GC::collecting.
2854 // It also clears GC::marking.
2855
2856 GCAssert(m_incrementalWork.Count() == 0)do { } while (0);
2857 GCAssert(m_barrierWork.Count() == 0)do { } while (0);
2858 Sweep();
2859 GCAssert(m_incrementalWork.Count() == 0)do { } while (0);
2860 GCAssert(m_barrierWork.Count() == 0)do { } while (0);
2861
2862 policy.signal(okToShrinkHeapTarget ? GCPolicyManager::END_FinalizeAndSweep : GCPolicyManager::END_FinalizeAndSweepNoShrink); // garbage collection is finished
2863#ifdef MMGC_MEMORY_PROFILER
2864 if(heap->Config().autoGCStats)
2865 DumpPauseInfo();
2866#endif
2867 }
2868
2869#ifdef _DEBUG
2870 boolbool GC::IsWhite(const void *item)
2871 {
2872 // back up to real beginning
2873 item = GetRealPointer((const void*) item)(const void*) item;
2874
2875 int bits = GetPageMapValueGuarded((uintptr_t)item);
2876 switch(bits) {
2877 case PageMap::kGCAllocPage:
2878 return GCAlloc::IsWhite(item);
2879 case PageMap::kGCLargeAllocPageFirst:
2880 return GCLargeAlloc::IsWhite(item);
2881 }
2882 return falsefalse;
2883 }
2884#endif // _DEBUG
2885
2886 // Here the purpose is to shift some work from the barrier mark stack to the regular
2887 // mark stack /provided/ the barrier mark stack seems very full. The regular mark stack
2888 // is normally empty.
2889 //
2890 // To avoid copying data, we will only move entire (full) segments, because they can
2891 // be moved by changing some pointers and counters. We will never move a non-full
2892 // segment.
2893
2894 void GC::CheckBarrierWork()
2895 {
2896 if (m_barrierWork.InactiveSegments() < 9) // 9 is pretty arbitrary
2897 return;
2898 m_incrementalWork.TransferOneInactiveSegmentFrom(m_barrierWork);
2899 }
2900
2901 // Here the purpose is to shift all the work from the barrier mark stack to the regular
2902 // mark stack, unconditionally. This may mean copying mark work from one stack to
2903 // the other (for work that's in a non-full segment), but full segments can just be
2904 // moved by changing some pointers and counters.
2905 //
2906 // Note it's possible for this to cause a mark stack overflow, and the caller must
2907 // deal with that.
2908
2909 void GC::FlushBarrierWork()
2910 {
2911#ifdef _DEBUG
2912 uint32_t bwork_count_old = m_barrierWork.Count();
2913#endif
2914
2915 if (!m_incrementalWork.TransferEverythingFrom(m_barrierWork)) {
2916 SignalMarkStackOverflow_NonGCObject();
2917 m_incrementalWork.TransferSomethingFrom(m_barrierWork);
2918 }
2919
2920 // Bugzilla 642792, 654727: must ensure progress can be made
2921 // in mark stack overflow loops, or else we will infinite loop.
2922 GCAssert(bwork_count_old == 0 || m_incrementalWork.Count() > 0)do { } while (0);
2923 }
2924
2925 void GC::WriteBarrierTrap(const void *container)
2926 {
2927 if (BarrierActive())
2928 InlineWriteBarrierTrap(container);
2929 }
2930
2931 void GC::privateWriteBarrier(const void *container, const void *address, const void *value)
2932 {
2933 privateInlineWriteBarrier(container, address, value);
2934 }
2935
2936 void GC::privateWriteBarrierRC(const void *container, const void *address, const void *value)
2937 {
2938 privateInlineWriteBarrierRC(container, address, value);
2939 }
2940
2941 /* static */
2942 void GC::WriteBarrierRC(const void *address, const void *value)
2943 {
2944 InlineWriteBarrierRC(address, value);
2945 }
2946
2947 /* static */
2948 void GC::WriteBarrierRC_ctor(const void *address, const void *value)
2949 {
2950 InlineWriteBarrierRC_ctor(address, value);
2951 }
2952
2953 /* static */
2954 void GC::WriteBarrierRC_dtor(const void *address)
2955 {
2956 InlineWriteBarrierRC_dtor(address);
2957 }
2958
2959 /* static */
2960 void GC::WriteBarrier(const void *address, const void *value)
2961 {
2962 InlineWriteBarrier(address, value);
2963 }
2964
2965 // It might appear that this can be optimized easily, but not so - there's a
2966 // lot of logic hiding here, and little to gain from hand-inlining.
2967
2968 void GC::privateConservativeWriteBarrierNoSubstitute(const void *address)
2969 {
2970 GCAssert(BarrierActive())do { } while (0);
2971 if(IsPointerToGCPage(address))
2972 InlineWriteBarrierTrap(FindBeginningFast(address));
2973 }
2974
2975 void GC::WriteBarrierNoSubstitute(const void *container, const void *value)
2976 {
2977 (void)value; // Can't get rid of this parameter now; part of an existing API
2978
2979 GCAssert(container != NULL)do { } while (0);
2980 GCAssert(IsPointerToGCPage(container))do { } while (0);
2981 GCAssert(FindBeginningGuarded(container) == container)do { } while (0);
2982 if (BarrierActive())
2983 InlineWriteBarrierTrap(container);
2984 }
2985
2986 // Add 'container' to the remembered set.
2987 //
2988 // IncrementalMark may move a segment off the remembered set;
2989 // FinishIncrementalMark will take care of what remains.
2990 //
2991 // Observe that if adding the item to the remembered set (a secondary mark stack)
2992 // fails, then the item is just pushed onto the regular mark stack; if that too
2993 // fails then normal stack overflow handling takes over. That is what we want,
2994 // as it guarantees that the item will be traced again.
2995
2996 void GC::WriteBarrierHit(const void* container)
2997 {
2998 GCAssert(IsPointerToGCObject(GetRealPointer(container)))do { } while (0);
2999 if (collecting)
3000 {
3001 // It's the job of the allocators to make sure the rhs is marked if
3002 // it is allocated on a page that's not yet swept. In particular,
3003 // the barrier is not needed to make sure that that object is kept
3004 // alive.
3005 //
3006 // Ergo all we need to do here is revert the lhs to marked and return.
3007 SetMark(container);
3008 return;
3009 }
3010 // Note, pushing directly here works right now because Push_GCObject never
3011 // performs any processing (breaking up a large object into shorter
3012 // segments, for example).
3013#ifdef _DEBUG
3014 // If this fails it's because the write barrier hits on an already-deleted
3015 // object - which is a bug to be sure, but we don't know if we may have to
3016 // support it anyway.
3017 WorkItemInvariants_GCObject(container);
3018#endif
3019 if (!m_barrierWork.Push_GCObject(container))
3020 Push_GCObject(container);
3021 }
3022
3023 void GC::movePointers(void* dstObject, void **dstArray, uint32_t dstOffset, const void **srcArray, uint32_t srcOffset, size_t numPointers)
3024 {
3025 if(BarrierActive() && GetMark(dstObject) && ContainsPointers(dstObject) &&
3026 // don't push small items that are moving pointers inside the same array
3027 (dstArray != srcArray || Size(dstObject) > kMarkItemSplitThreshold)) {
3028 // this could be optimized to just re-scan the dirty region
3029 InlineWriteBarrierTrap(dstObject);
3030 }
3031 VMPI_memmove::memmove(dstArray + dstOffset, srcArray + srcOffset, numPointers * sizeof(void*));
3032 }
3033
3034 void GC::movePointersWithinBlock(void** array, uint32_t dstOffsetInBytes, uint32_t srcOffsetInBytes, size_t numPointers, boolbool zeroEmptied)
3035 {
3036 if (srcOffsetInBytes == dstOffsetInBytes || numPointers == 0)
3037 return;
3038
3039 if (BarrierActive() &&
3040 GetMark(array) &&
3041 ContainsPointers(array) &&
3042 // don't push small items that are moving pointers inside the same array
3043 Size(array) > kMarkItemSplitThreshold)
3044 {
3045 // this could be optimized to just re-scan the dirty region
3046 InlineWriteBarrierTrap(array);
3047 }
3048
3049 size_t const bytesToMove = numPointers * sizeof(void*);
3050 VMPI_memmove::memmove((char*)array + dstOffsetInBytes, (char*)array + srcOffsetInBytes, bytesToMove);
3051
3052 if (zeroEmptied)
3053 {
3054 size_t zeroOffsetInBytes, bytesToZero;
3055 if (srcOffsetInBytes > dstOffsetInBytes)
3056 {
3057 // moving down, zero the end
3058 bytesToZero = srcOffsetInBytes - dstOffsetInBytes;
3059 zeroOffsetInBytes = dstOffsetInBytes + bytesToMove;
3060 }
3061 else
3062 {
3063 // moving up, zero the start
3064 bytesToZero = dstOffsetInBytes - srcOffsetInBytes;
3065 zeroOffsetInBytes = srcOffsetInBytes;
3066 }
3067 VMPI_memset::memset((char*)array + zeroOffsetInBytes, 0, bytesToZero);
3068 }
3069 }
3070
3071 void GC::reversePointersWithinBlock(void* mem, size_t offsetInBytes, size_t numPointers)
3072 {
3073 if (mem == NULL__null || numPointers <= 1)
3074 return;
3075
3076 if (BarrierActive() &&
3077 GetMark(mem) &&
3078 ContainsPointers(mem) &&
3079 // don't push small items that are moving pointers inside the same mem
3080 Size(mem) > kMarkItemSplitThreshold)
3081 {
3082 // this could be optimized to just re-scan the dirty region
3083 InlineWriteBarrierTrap(mem);
3084 }
3085
3086 void** array = (void**)((char*)mem + offsetInBytes);
3087 for (size_t i = 0, n = numPointers/2; i < n; i++)
3088 {
3089 size_t const i2 = numPointers - i - 1;
3090 void* const v = array[i];
3091 void* const v2 = array[i2];
3092 array[i] = v2;
3093 array[i2] = v;
3094 }
3095 }
3096
3097 uint32_t *GC::AllocBits(int numBytes, int sizeClass)
3098 {
3099 uint32_t *bits;
3100
3101 GCAssert(numBytes % 4 == 0)do { } while (0);
3102
3103 #ifdef MMGC_64BIT // we use first 8-byte slot for the free list
3104 if (numBytes == 4)
3105 numBytes = 8;
3106 #endif
3107
3108 // hit freelists first
3109 if(m_bitsFreelists[sizeClass]) {
3110 bits = m_bitsFreelists[sizeClass];
3111 m_bitsFreelists[sizeClass] = *(uint32_t**)bits;
3112 VMPI_memset::memset(bits, 0, sizeof(uint32_t*));
3113 return bits;
3114 }
3115
3116 if(!m_bitsNext) {
3117 // Bugzilla 551833: It's OK to use heapAlloc here (as opposed to
3118 // heap->AllocNoOOM, say) because the caller knows AllocBits()
3119 // can trigger OOM.
3120 m_bitsNext = (uint32_t*)heapAlloc(1);
3121 }
3122
3123 int leftOver = GCHeap::kBlockSize - ((uintptr_t)m_bitsNext & GCHeap::kOffsetMask);
3124 if(leftOver >= numBytes) {
3125 bits = m_bitsNext;
3126 if(leftOver == numBytes)
3127 m_bitsNext = 0;
3128 else
3129 m_bitsNext += numBytes/sizeof(uint32_t);
3130 } else {
3131 if(leftOver>=int(sizeof(void*))) {
3132 // put waste in freelist
3133 for(int i=0, n=kNumSizeClasses; i<n; i++) {
3134 GCAlloc *a = noPointersAllocs[i];
3135 if(!a->m_bitsInPage && a->m_numBitmapBytes <= leftOver) {
3136 FreeBits(m_bitsNext, a->m_sizeClassIndex);
3137 break;
3138 }
3139 }
3140 }
3141 m_bitsNext = 0;
3142 // recurse rather than duplicating code
3143 return AllocBits(numBytes, sizeClass);
3144 }
3145 return bits;
3146 }
3147
3148 void GC::AddRoot(GCRoot *root)
3149 {
3150 MMGC_LOCK(m_rootListLock)MMgc::GCAcquireSpinlock _lock(&m_rootListLock);
3151 root->prev = NULL__null;
3152 root->next = m_roots;
3153 if(m_roots)
3154 m_roots->prev = root;
3155 m_roots = root;
3156 }
3157
3158 void GC::RemoveRoot(GCRoot *root)
3159 {
3160 MMGC_LOCK(m_rootListLock)MMgc::GCAcquireSpinlock _lock(&m_rootListLock);
3161 if( m_roots == root )
3162 m_roots = root->next;
3163 else
3164 root->prev->next = root->next;
3165
3166 if(root->next)
3167 root->next->prev = root->prev;
3168 }
3169
3170 void GC::AddCallback(GCCallback *cb)
3171 {
3172 cb->prevCB = NULL__null;
3173 cb->nextCB = m_callbacks;
3174 if(m_callbacks)
3175 m_callbacks->prevCB = cb;
3176 m_callbacks = cb;
3177 }
3178
3179 void GC::RemoveCallback(GCCallback *cb)
3180 {
3181 if( m_callbacks == cb )
3182 m_callbacks = cb->nextCB;
3183 else
3184 cb->prevCB->nextCB = cb->nextCB;
3185
3186 if(cb->nextCB)
3187 cb->nextCB->prevCB = cb->prevCB;
3188 }
3189
3190 GCObject *GCWeakRef::get()
3191 {
3192 // Bugzilla 572331.
3193 // It is possible for presweep handlers to extract pointers to unmarked objects
3194 // from weakrefs and store them into marked objects. Since the write barrier is
3195 // disabled during collecting we must ensure that the unmarked object becomes
3196 // marked by some other means in that situation, and we do that here by introducing
3197 // a local read barrier that resurrects those objects.
3198 //
3199 // The read barrier just pushes the to-be-resurrected object onto the mark
3200 // stack; the marker will be run in Sweep() after the presweep calls are done.
3201
3202 if (m_obj != 0) {
3203 GC *gc = GC::GetGC(m_obj);
3204 if (gc->Collecting() && !GC::GetMark(m_obj)) {
3205 // If this assertion fails then we have a finalizer (C++ destructor on a
3206 // GCFinalizableObject) resurrecting an object. That should not happen,
3207 // because we clear all GCWeakRefs pointing to unmarked objects before
3208 // running finalizers.
3209
3210 // Bugzilla 575631 (explanation below)
3211 // GCAssert(gc->Presweeping());
3212
3213 // Bugzilla 575631: GCAlloc::Finalize's interleaving
3214 // of mark-bit resets and finalizer invocations means
3215 // that the conditions above don't imply we're in
3216 // Presweep. Nonetheless, marking an object with
3217 // cleared mark bits isn't legal in GCAlloc::Finalize,
3218 // so instead of asserting that we are presweeping, we
3219 // use that condition as a guard.
3220 if (gc->Presweeping()) {
3221 // Bugzilla 615511:
3222 // Push_GCObject does not set the queued bit, but every object on
3223 // the mark stack must have the queued bit set.
3224 GC::GetGCBits(GetRealPointer(m_obj)m_obj) |= kQueued;
3225 gc->Push_GCObject(m_obj);
3226 }
3227 }
3228 }
3229
3230 return (GCObject*)m_obj;
3231 }
3232
3233 /*static*/
3234 GCWeakRef* GC::GetWeakRef(const void *userptr)
3235 {
3236 GC *gc = GetGC(userptr);
3237 GCWeakRef *ref = (GCWeakRef*) gc->weakRefs.get(userptr);
3238
3239 GCAssert(gc->IsPointerToGCPage(userptr))do { } while (0);
3240 GCAssert(gc->IsPointerToGCObject(GetRealPointer(userptr)))do { } while (0);
3241 GCAssert((gc->GetGCBits(GetRealPointer(userptr)) & GCAlloc::kFreelist) != GCAlloc::kFreelist)do { } while (0);
3242
3243 if(ref == NULL__null)
3244 {
3245 if (gc->Collecting())
3246 {
3247 // Do not allow weak references to be created to objects that are about
3248 // to be freed; always return gc->emptyWeakRef. Basically this should not
3249 // happen - the program is not sound, it's trying to resurrect a dead object
3250 // - so we assert. The operation used to be legal but in an attempt to
3251 // improve the performance of GC::Finalize we have made it illegal. BTW,
3252 // it has always been illegal to store a pointer to a dead object into a
3253 // live object, thus resurrecting the dead object - though the conservative
3254 // collector would deal with that. The exact collector does not.
3255 if (GC::GetMark(userptr) == 0)
3256 {
3257 GCAssertMsg(false, "Do not attempt to resurrect a dying object by creating a weak reference to it")do { } while (0);
3258 return gc->emptyWeakRef;
3259 }
3260 }
3261 ref = new (gc) GCWeakRef(userptr);
3262 gc->weakRefs.put(userptr, ref);
3263 SetHasWeakRef(userptr, truetrue);
3264#ifdef MMGC_WEAKREF_PROFILER
3265 if (gc->weaklings != NULL__null) {
3266 gc->weaklings->accountForObject(ref);
3267 gc->weaklings->reportPopulation(gc->weakRefs.count());
3268 }
3269#endif
3270 } else {
3271 GCAssert(ref->get() == userptr)do { } while (0);
3272 }
3273 return ref;
3274 }
3275
3276 void GC::ClearWeakRef(const void *item, boolbool allowRehash)
3277 {
3278 GCWeakRef *ref = (GCWeakRef*) weakRefs.remove(item, allowRehash);
3279 GCAssert(weakRefs.get(item) == NULL)do { } while (0);
3280 GCAssert(ref != NULL || heap->GetStatus() == kMemAbort)do { } while (0);
3281 if(ref) {
3282 GCAssert(ref->isNull() || ref->peek() == item)do { } while (0);
3283 ref->m_obj = NULL__null;
3284 SetHasWeakRef(item, falsefalse);
3285 }
3286 }
3287
3288#ifdef _DEBUG
3289 void GC::WhitePointerScan(const void *mem, size_t size)
3290 {
3291 uintptr_t *p = (uintptr_t *) mem;
3292 // the minus 8 skips the GCEndOfObjectPoison and back pointer
3293 uintptr_t *end = p + ((size) / sizeof(void*));
3294
3295 while(p < end)
3296 {
3297 uintptr_t val = *p;
3298 if(val == GCHeap::GCEndOfObjectPoison)
3299 break;
3300 if(IsWhite((const void*) (val&~7)) &&
3301 *(((int32_t*)(val&~7))+1) != int32_t(GCHeap::GCFreedPoison) &&
3302 *(((int32_t*)(val&~7))+1) != int32_t(GCHeap::GCSweptPoison))
3303 {
3304 GCDebugMsgavmplus::AvmDebugMsg(falsefalse, "Object %p allocated here:\n", mem);
3305 PrintAllocStackTrace(mem);
3306 GCDebugMsgavmplus::AvmDebugMsg(falsefalse, "Didn't mark pointer at %p, object %p allocated here:\n", p, (void*)val);
3307 PrintAllocStackTrace((const void*)(val&~7));
3308 GCAssert(false)do { } while (0);
3309 }
3310 p++;
3311 }
3312 }
3313
3314 void GC::FindMissingWriteBarriers()
3315 {
3316 if(!incrementalValidation)
3317 return;
3318
3319 uintptr_t m = pageMap.MemStart();
3320 while(m < pageMap.MemEnd())
3321 {
3322 // divide by 4K to get index
3323 int bits = GetPageMapValue(m);
3324 switch(bits)
3325 {
3326 case PageMap::kNonGC:
3327 m += GCHeap::kBlockSize;
3328 break;
3329 case PageMap::kGCLargeAllocPageFirst:
3330 {
3331 GCLargeAlloc::LargeBlock *lb = (GCLargeAlloc::LargeBlock*)m;
3332 const void *item = GetUserPointer(lb->GetObject())lb->GetObject();
3333 if(GetMark(item) && lb->containsPointers) {
3334 WhitePointerScan(item, lb->size - DebugSize()0);
3335 }
3336 m += lb->GetNumBlocks() * GCHeap::kBlockSize;
3337 }
3338 break;
3339 case PageMap::kGCAllocPage:
3340 {
3341 // go through all marked objects in this page
3342 GCAlloc::GCBlock *b = (GCAlloc::GCBlock *) m;
3343 GCAlloc* alloc = (GCAlloc*)b->alloc;
3344 if(alloc->ContainsPointers()) {
3345 for (int i=0; i< alloc->m_itemsPerBlock; i++) {
3346
3347 // find all marked (but not kFreelist) objects and search them
3348 void *item = (char*)b->items + alloc->m_itemSize*i;
3349 if(!GetMark(GetUserPointer(item)item) || GetQueued(GetUserPointer(item)item))
3350 continue;
3351
3352 WhitePointerScan(GetUserPointer(item)item, alloc->m_itemSize - DebugSize()0);
3353 }
3354 }
3355 m += GCHeap::kBlockSize;
3356 }
3357 break;
3358 default:
3359 GCAssert(false)do { } while (0);
3360 break;
3361 }
3362 }
3363 }
3364#endif //_DEBUG
3365
3366 void *GC::heapAlloc(size_t siz, int flags)
3367 {
3368 void *ptr = heap->Alloc((int)siz, flags);
3369 if(ptr)
3370 policy.signalBlockAllocation(siz);
3371 return ptr;
3372 }
3373
3374 void GC::heapFree(void *ptr, size_t siz, boolbool profile)
3375 {
3376 if(!siz)
3377 siz = heap->Size(ptr);
3378 policy.signalBlockDeallocation(siz);
3379 heap->FreeInternal(ptr, profile, truetrue);
3380 }
3381
3382 size_t GC::GetBytesInUse()
3383 {
3384 size_t ask;
3385 size_t allocated;
3386 GetUsageInfo(ask, allocated);
3387 (void)ask;
3388 return allocated;
3389 }
3390
3391 void GC::GetUsageInfo(size_t& totalAskSize, size_t& totalAllocated)
3392 {
3393 totalAskSize = 0;
3394 totalAllocated = 0;
3395
3396 size_t ask;
3397 size_t allocated;
3398
3399 GCAlloc** allocators[] = {containsPointersRCAllocs, containsPointersAllocs, noPointersAllocs};
3400 for(int j = 0;j<3;j++)
3401 {
3402 GCAlloc** gc_alloc = allocators[j];
3403
3404 for(int i=0; i < kNumSizeClasses; i++)
3405 {
3406 gc_alloc[i]->GetUsageInfo(ask, allocated);
3407 totalAskSize += ask;
3408 totalAllocated += allocated;
3409 }
3410 }
3411
3412 largeAlloc->GetUsageInfo(ask, allocated);
3413 totalAskSize += ask;
3414 totalAllocated += allocated;
3415 }
3416
3417 void GC::allocaInit()
3418 {
3419 top_segment = NULL__null;
3420 stacktop = NULL__null;
3421 #ifdef _DEBUG
3422 stackdepth = 0;
3423 #endif
3424 pushAllocaSegment(AVMPLUS_PARAM_ALLOCA_DEFSIZE1000);
3425 }
3426
3427 void GC::allocaShutdown()
3428 {
3429 while (top_segment != NULL__null)
3430 popAllocaSegment();
3431 top_segment = NULL__null;
3432 stacktop = NULL__null;
3433 }
3434
3435 void GC::allocaUnwind()
3436 {
3437 allocaShutdown();
3438 allocaInit();
3439 }
3440
3441 void* GC::allocaPush(size_t nbytes, AllocaAutoPtr& x)
3442 {
3443 GCAssert(x.unwindPtr == NULL)do { } while (0);
3444 x.gc = this;
3445 x.unwindPtr = stacktop;
3446 nbytes = GCHeap::CheckForAllocSizeOverflow(nbytes, 7) & ~7;
3447 if ((char*)stacktop + nbytes <= top_segment->limit) {
3448 stacktop = (char*)stacktop + nbytes;
3449 return x.unwindPtr;
3450 }
3451 return allocaPushSlow(nbytes);
3452 }
3453
3454 void GC::allocaPopToSlow(void* top)
3455 {
3456 GCAssert(top_segment != NULL)do { } while (0);
3457 GCAssert(!(top >= top_segment->start && top <= top_segment->limit))do { } while (0);
3458 while (!(top >= top_segment->start && top <= top_segment->limit))
3459 popAllocaSegment();
3460 GCAssert(top_segment != NULL)do { } while (0);
3461 }
3462
3463 void* GC::allocaPushSlow(size_t nbytes)
3464 {
3465 size_t alloc_nbytes = nbytes;
3466 if (alloc_nbytes < AVMPLUS_PARAM_ALLOCA_DEFSIZE1000)
3467 alloc_nbytes = AVMPLUS_PARAM_ALLOCA_DEFSIZE1000;
3468 pushAllocaSegment(alloc_nbytes);
3469 void *p = stacktop;
3470 stacktop = (char*)stacktop + nbytes;
3471 return p;
3472 }
3473
3474 void GC::pushAllocaSegment(size_t nbytes)
3475 {
3476 GCAssert(nbytes % 8 == 0)do { } while (0);
3477 #ifdef _DEBUG
3478 stackdepth += nbytes;
3479 #endif
3480 void* memory = AllocRCRoot(nbytes);
3481 AllocaStackSegment* seg = mmfx_new(AllocaStackSegment)new (MMgc::kUseFixedMalloc) AllocaStackSegment;
3482 seg->start = memory;
3483 seg->limit = (void*)((char*)memory + nbytes);
3484 seg->top = NULL__null;
3485 seg->prev = top_segment;
3486 if (top_segment != NULL__null)
3487 top_segment->top = stacktop;
3488 top_segment = seg;
3489 stacktop = memory;
3490 }
3491
3492 void GC::popAllocaSegment()
3493 {
3494 #ifdef _DEBUG
3495 stackdepth -= (char*)top_segment->limit - (char*)top_segment->start;
3496 #endif
3497 FreeRCRoot(top_segment->start);
3498 AllocaStackSegment* seg = top_segment;
3499 top_segment = top_segment->prev;
3500 if (top_segment != NULL__null)
3501 stacktop = top_segment->top;
3502 mmfx_delete(seg)::MMgcDestructTaggedScalarChecked(seg);
3503 }
3504
3505 GCAutoEnter::GCAutoEnter(GC *gc, EnterType type) : m_gc(NULL__null), m_prevgc(NULL__null)
3506 {
3507 if(gc) {
3508 GC *prevGC = gc->heap->GetEnterFrame()->GetActiveGC();
3509 if(prevGC != gc) {
3510 // must enter first as it acquires the gc lock
3511 if(gc->ThreadEnter(this, /*doCollectionWork=*/truetrue, type == kTryEnter)) {
3512 m_gc = gc;
3513 m_prevgc = prevGC;
3514 }
3515 }
3516 }
3517 }
3518
3519 GCAutoEnter::~GCAutoEnter()
3520 {
3521 Destroy(truetrue);
3522 }
3523
3524 /*virtual*/
3525 void GCAutoEnter::Unwind()
3526 {
3527 if(m_gc) {
3528 GC *gc = m_gc;
3529 gc->SignalImminentAbort();
3530 }
3531 }
3532
3533 void GCAutoEnter::Destroy(boolbool doCollectionWork)
3534 {
3535 if(m_gc) {
3536 m_gc->ThreadLeave(doCollectionWork, m_prevgc);
3537 m_gc = m_prevgc = NULL__null;
3538 }
3539 }
3540
3541 GCAutoEnterPause::GCAutoEnterPause(GC *gc) : gc(gc), enterSave(gc->GetAutoEnter())
3542 {
3543 GCAssertMsg(gc->GetStackEnter() != 0, "Invalid MMGC_GC_ROOT_THREAD usage, GC not already entered, random crashes will ensue")do { } while (0);
3544 gc->ThreadLeave(/*doCollectionWork=*/falsefalse, gc);
3545 }
3546
3547 GCAutoEnterPause::~GCAutoEnterPause()
3548 {
3549 GCAssertMsg(gc->GetStackEnter() == 0, "Invalid MMGC_GC_ROOT_THREAD usage, GC not exitted properly, random crashes will ensue")do { } while (0);
3550 gc->ThreadEnter(enterSave, falsefalse, falsefalse);
3551 }
3552
3553 boolbool GC::ThreadEnter(GCAutoEnter *enter, boolbool doCollectionWork, boolbool tryEnter)
3554 {
3555 if(!VMPI_lockTestAndAcquire(&m_gcLock)) {
3556 if(tryEnter)
3557 return falsefalse;
3558 if(m_gcThread != VMPI_currentThread())
3559 VMPI_lockAcquire(&m_gcLock);
3560 }
3561
3562 heap->SetActiveGC(this);
3563
3564 if(enterCount++ == 0) {
3565 heap->GetEnterFrame()->AddAbortUnwindObject(enter);
3566 stackEnter = enter;
3567 m_gcThread = VMPI_currentThread();
3568 if(doCollectionWork) {
3569 ThreadEdgeWork();
3570 }
3571 }
3572 return truetrue;
3573 }
3574
3575 void GC::ThreadLeave( boolbool doCollectionWork, GC *prevGC)
3576 {
3577
3578 if(enterCount-1 == 0) {
3579 if(doCollectionWork) {
3580 ThreadEdgeWork();
3581 }
3582 heap->GetEnterFrame()->RemoveAbortUnwindObject(stackEnter);
3583 }
3584
3585 // We always pop the active GC but have to do so before releasing the lock
3586#ifdef DEBUG
3587 GC* curgc =
3588#endif
3589 heap->SetActiveGC(prevGC);
3590 GCAssert(curgc == this)do { } while (0);
3591
3592 if(--enterCount == 0) {
3593 stackEnter = NULL__null;
3594 // cleared so we remain thread ambivalent
3595 rememberedStackTop = NULL__null;
3596 m_gcThread = NULL__null;
3597 VMPI_lockRelease(&m_gcLock);
3598 }
3599 }
3600
3601 void GC::ThreadEdgeWork()
3602 {
3603 if(destroying)
3604 return;
3605
3606 if(policy.queryFullCollectionQueued())
3607 Collect(falsefalse);
3608 else
3609 ReapZCT(falsefalse);
3610
3611 if(!stackCleaned)
3612 CleanStack();
3613 }
3614
3615 void GC::memoryStatusChange(MemoryStatus, MemoryStatus to)
3616 {
3617 // ZCT blockage: what if we get here from zct growth?
3618
3619 // Mark/sweep blockage: what about mark stack,
3620 // presweep,post-sweep,finalize allocations?
3621
3622 // if ZCT or GC can't complete we can't free memory! currently
3623 // we do nothing, which means we rely on reserve or other
3624 // listeners to free memory or head straight to abort
3625
3626 if(to == kFreeMemoryIfPossible) {
3627 if(onThread()) {
3628 Collect();
3629 } else {
3630 // If we're not already in the middle of collecting from another thread's GC, then try to...
3631 GCAutoEnter enter(this, GCAutoEnter::kTryEnter);
3632 if(enter.Entered()) {
3633 Collect(falsefalse);
3634 }
3635 // else nothing can be done
3636 }
3637 }
3638 }
3639
3640#ifdef DEBUG
3641 void GC::ShouldBeInactive()
3642 {
3643 GCAssert(m_gcThread == 0)do { } while (0);
3644 GCAssert(stackEnter == NULL)do { } while (0);
3645 GCAssert(top_segment != NULL && top_segment->prev == NULL && top_segment->start == stacktop)do { } while (0);
3646 GCAssert(VMPI_lockTestAndAcquire(&m_gcLock) && VMPI_lockRelease(&m_gcLock))do { } while (0);
3647 }
3648#endif
3649
3650 void GC::SignalImminentAbort()
3651 {
3652 policy.SignalImminentAbort();
3653 zct.SignalImminentAbort();
3654
3655 if (collecting || marking || BarrierActive())
3656 {
3657 ClearMarkStack();
3658 m_barrierWork.Clear();
3659 ClearMarks();
3660 m_markStackOverflow = falsefalse;
3661 collecting = falsefalse;
3662 marking = falsefalse;
3663 markerActive = 0;
3664 }
3665
3666 if(GetAutoEnter() != NULL__null) {
3667 GetAutoEnter()->Destroy(falsefalse);
3668 }
3669 }
3670
3671 GC::AutoRCRootSegment::AutoRCRootSegment(GC* gc, void* mem, size_t size)
3672 : RCRootSegment(gc, mem, size)
3673 {
3674 gc->AddRCRootSegment(this);
3675 }
3676
3677 GC::AutoRCRootSegment::~AutoRCRootSegment()
3678 {
3679 GetGC()->RemoveRCRootSegment(this);
3680 }
3681
3682#ifdef MMGC_HEAP_GRAPH
3683
3684 void GC::addToBlacklist(const void *gcptr)
3685 {
3686 blacklist.add(gcptr, gcptr);
3687 }
3688
3689 void GC::removeFromBlacklist(const void *gcptr)
3690 {
3691 blacklist.remove(gcptr);
3692 }
3693
3694 const void *GC::findGCGraphBeginning(const void *addr, boolbool &wasDeletedGCRoot)
3695 {
3696 /* These are all the possibilities
3697 1) GC small object
3698 2) GC large object
3699 3) GCRoot
3700 4) OS stack
3701 */
3702 if(!IsPointerToGCPage(addr)) {
3703 GCRoot *r = m_roots;
3704 while(r) {
3705 if(addr >= r->Get() && addr < r->End())
3706 return r->Get();
3707 r = r->next;
3708 }
3709
3710 // could be a deleted GCRoot
3711 GCHeap::HeapBlock *b = heap->AddrToBlock(addr);
3712 if(b) {
3713 wasDeletedGCRoot = truetrue;
3714 if(b->inUse()) {
3715 if(b->size == 1) {
3716 return FixedAlloc::FindBeginning(addr);
3717 } else {
3718 return GetUserPointer(b->baseAddr)b->baseAddr;
3719 }
3720 }
3721 }
3722 return NULL__null;
3723 }
3724 return FindBeginningGuarded(addr, truetrue); // will return NULL for OS stack
3725 }
3726
3727 void GC::DumpBackPointerChain(const void *p)
3728 {
3729 dumpBackPointerChain(p, markerGraph);
3730 }
3731
3732 void GC::dumpBackPointerChain(const void *p, HeapGraph& g)
3733 {
3734 GCLog("Dumping back pointer chain for %p\n", p);
3735 PrintAllocStackTrace(p);
3736 dumpBackPointerChainHelper(p, g);
3737 GCLog("End back pointer chain for %p\n", p);
3738 }
3739
3740 void GC::dumpBackPointerChainHelper(const void *p, HeapGraph& g)
3741 {
3742 GCHashtable *pointers = g.getPointers(p);
3743 if(pointers) {
3744 GCHashtable::Iterator iter(pointers);
3745 const void *addr = iter.nextKey();
3746 if(addr) {
3747 boolbool wasDeletedGCRoot=falsefalse;
3748 const void *container = findGCGraphBeginning(addr, wasDeletedGCRoot);
3749 const void *displayContainer = container ? container : addr;
3750 uint32_t offset = container ? uint32_t((char*)addr - (char*)container) : 0;
3751 const char *containerDescription = IsPointerToGCPage(container) ? "gc" : (container ? "gcroot" : "stack");
3752 if(wasDeletedGCRoot)
3753 containerDescription = "gcroot-deleted";
3754 GCLog("%p(%p)(%s) -> %p\n", displayContainer, offset, containerDescription, p);
3755 if(container) {
3756 PrintAllocStackTrace(container);
3757 dumpBackPointerChainHelper(container, g);
3758 }
3759 }
3760 }
3761 }
3762
3763 void HeapGraph::edge(const void *addr, const void *newValue)
3764 {
3765 const void *oldValue = GC::Pointer(*(void**)addr);
3766 newValue = GC::Pointer(newValue);
3767 GCHashtable *addresses;
3768 if(oldValue) {
3769 addresses = (GCHashtable*)backEdges.get(oldValue);
3770 if(addresses) {
3771 addresses->remove(addr);
3772 }
3773 }
3774 if(newValue) {
3775 addresses = (GCHashtable*)backEdges.get(newValue);
3776 if(!addresses) {
3777 // Use system memory to avoid profiling diagnostic
3778 // data, having memory profiling and heap graph on
3779 // takes forver to complete otherwise on some tests.
3780 addresses = system_new(GCHashtable())new GCHashtable();
3781 backEdges.put(newValue, addresses);
3782 }
3783 addresses->add(addr, addr);
3784 }
3785 }
3786
3787 void HeapGraph::clear()
3788 {
3789 GCHashtable_VMPI::Iterator iter(&backEdges);
3790 const void *key;
3791 while((key = iter.nextKey()) != NULL__null) {
3792 GCHashtable *addresses = (GCHashtable*)iter.value();
3793 system_delete(addresses)delete addresses;
3794 }
3795 backEdges.clear();
3796 }
3797
3798 GCHashtable *HeapGraph::getPointers(const void *obj)
3799 {
3800 return (GCHashtable*)backEdges.get(obj);
3801 }
3802
3803 void GC::pruneBlacklist()
3804 {
3805 if(blacklist.count() > 0) {
3806 {
3807 GCHashtable::Iterator iter(&blacklist);
3808 const void *p;
3809 while((p = iter.nextKey()) != NULL__null) {
3810 if(!GetMark(p)) {
3811 blacklist.remove(p, /*allowrehash=*/falsefalse);
3812 }
3813 }
3814 }
3815 blacklist.prune();
3816 }
3817 }
3818
3819 void GC::printBlacklist()
3820 {
3821 if(blacklist.count() > 0) {
3822 GCHashtable::Iterator iter(&blacklist);
3823 const void *p;
3824 while((p = iter.nextKey()) != NULL__null) {
3825 GCLog("Blacklist item 0x%p %s found in marker graph\n", p, markerGraph.getPointers(p) ? "was" : "wasn't");
3826 GCLog("Blacklist item 0x%p %s found in mutator graph\n", p, mutatorGraph.getPointers(p) ? "was" : "wasn't");
3827 dumpBackPointerChain(p, markerGraph);
3828 }
3829 }
3830 }
3831#endif
3832
3833#ifdef MMGC_DELETION_PROFILER
3834 void GC::ProfileExplicitDeletion(const void* item)
3835 {
3836 if (deletos != NULL__null)
3837 deletos->accountForObject(item);
3838 }
3839#endif
3840
3841 // Object locks are implemented as garbage-collectable objects for simplicity
3842 // but are never eligible for garbage collection while locked. In principle
3843 // they can be deleted explicitly in UnlockObject because it's illegal for the
3844 // mutator to use the lock object after it has been unlocked.
3845 //
3846 // The user of the protocol does not know that lock objects are GC objects.
3847 // The lock object pointer need not be stored in a reachable location and even
3848 // if it is no write barrier should be used.
3849
3850 class GCObjectLock : public GCTraceableObject
3851 {
3852 public:
3853 GCObjectLock(const void* object)
3854 : object(object)
3855 , prev(NULL__null)
3856 , next(NULL__null)
3857 {
3858 }
3859
3860 virtual boolbool gcTrace(GC* gc, size_t)
3861 {
3862 gc->TraceLocation(&object);
3863 gc->TraceLocation(&next);
3864 gc->TraceLocation(&prev);
3865 return falsefalse;
3866 }
3867
3868 const void* object;
3869 GCMember<GCObjectLock> prev;
3870 GCMember<GCObjectLock> next;
3871 };
3872
3873 GCObjectLock* GC::LockObject(const void* userptr)
3874 {
3875 GCAssertMsg(userptr != NULL, "LockObject should never be called on a NULL pointer.")do { } while (0);
3876 GCObjectLock* lock = new (this, kExact) GCObjectLock(userptr);
3877 if (userptr != NULL__null && IsRCObject(userptr))
3878 ((RCObject*)userptr)->IncrementRef();
3879 if (lockedObjects != NULL__null)
3880 lockedObjects->prev = lock;
3881 lock->next = lockedObjects;
3882 lockedObjects = lock;
3883 return lock;
3884 }
3885
3886 const void* GC::GetLockedObject(GCObjectLock* lock)
3887 {
3888 GCAssertMsg(lock->object != NULL, "GetLockedObject should never be called on an unlocked lock.")do { } while (0);
3889 return lock->object;
3890 }
3891
3892 void GC::UnlockObject(GCObjectLock* lock)
3893 {
3894 if (lock->object == NULL__null) {
3895 GCAssert(!"UnlockObject should never be called on an unlocked lock.")do { } while (0);
3896 return;
3897 }
3898 if (IsRCObject(lock->object))
3899 ((RCObject*)(lock->object))->DecrementRef();
3900 if (lock->prev != NULL__null)
3901 lock->prev->next = lock->next;
3902 else
3903 lockedObjects = lock->next;
3904 if (lock->next != NULL__null)
3905 lock->next->prev = lock->prev;
3906 lock->next = NULL__null;
3907 lock->prev = NULL__null;
3908 lock->object = NULL__null;
3909 }
3910
3911#ifdef DEBUG
3912 void GC::TracePointerCheck(const void *derivedPointer)
3913 {
3914 GC *gc = GetActiveGC();
3915 void *userptr = gc->FindBeginningFast(derivedPointer);
3916 uint32_t offset = uint32_t(uintptr_t(derivedPointer) - uintptr_t(userptr));
3917 if(GC::GetGCBits(GetRealPointer(userptr)userptr) & kVirtualGCTrace)
3918 {
3919 GCAssertMsg(((GCTraceableBase*)userptr)->gcTraceOffsetIsTraced(offset) != kOffsetNotFound, "Missing exact tracing annotation!")do { } while (0);
3920 }
3921 }
3922
3923 /*static*/
3924 GCTracerCheckResult GC::CheckOffsetIsInList(uint32_t offset, const uint32_t offsets[],size_t len)
3925 {
3926 for(size_t i=0;i<len;i++)
3927 {
3928 if(offsets[i]==offset)
3929 return kOffsetFound;
3930 }
3931 return kOffsetNotFound;
3932 }
3933
3934#ifdef MMGC_HEAP_GRAPH
3935 boolbool GC::BackPointerChainStillValid(const void *obj)
3936 {
3937 boolbool valid = falsefalse;
3938 GCHashtable *pointers = markerGraph.getPointers(obj);
3939 if(pointers) {
3940 GCHashtable::Iterator iter(pointers);
3941 void **addr = (void**)iter.nextKey();
3942 if(addr) {
3943 boolbool wasDeletedGCRoot=falsefalse;
3944 const void *container = findGCGraphBeginning(addr, wasDeletedGCRoot);
3945 if(Pointer(*addr) == obj)
3946 return BackPointerChainStillValid(container);
3947 else
3948 return falsefalse;
3949 }
3950 }
3951 return valid;
3952 }
3953#endif
3954
3955 void GC::DefRefValidate(RCObject* obj)
3956 {
3957 if(!GetMark(obj))
3958 return;
3959
3960#ifdef MMGC_HEAP_GRAPH
3961 // Its possible that the path that this thing got marked via
3962 // is now gone, ie maybe a RCObject dtor deleted it so its
3963 // really not reachable. Avoid asserting on those by checking
3964 // the back pointer linkage.
3965 if(!BackPointerChainStillValid(obj))
3966 return;
3967
3968 GCLog("Allocation trace:");
3969 PrintAllocStackTrace(obj);
3970
3971#ifdef MMGC_RC_HISTORY
3972 obj->DumpHistory();
3973#else
3974 GCLog("To see the ref count history enable MMGC_RC_HISTORY and MMGC_MEMORY_PROFILER are enabled and set the MMGC_PROFILE env var to 1.");
3975#endif
3976
3977 DumpBackPointerChain(obj);
3978 GCAssertMsg(false, "Zero count object reachable from GCRoot, this may indicate a missing write barrier")do { } while (0);
3979
3980#ifndef MMGC_MEMORY_PROFILER
3981 GCLog("Make sure MMGC_MEMORY_PROFILER is enabled for more useful diagnostic info.");
3982#else
3983 if(heap->GetProfiler() == NULL__null)
3984 GCLog("Set the MMGC_PROFILE env var to 1 for better diagnostic information.");
3985#endif
3986
3987#else // MMGC_HEAP_GRAPH
3988
3989 // Can only warn if we don't have a heap graph since its too
3990 // noisy without the BackPointerChainStillValid check.
3991 GCLog("Warning: possible ref count problem, turn on AVMTWEAK_HEAP_GRAPH to enable drc validation asserts.");
3992
3993#endif // !MMGC_HEAP_GRAPH
3994 }
3995
3996#endif // DEBUG
3997
3998}