Bug Summary

File:platform/mac/avmshell/../../../MMgc/GCAlloc.cpp
Location:line 327, column 17
Description:Array access (from variable 'pbits') results in a null pointer dereference

Annotated Source Code

1/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3/* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 *
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
10 *
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
15 *
16 * The Original Code is [Open Source Virtual Machine.].
17 *
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2004-2006
21 * the Initial Developer. All Rights Reserved.
22 *
23 * Contributor(s):
24 * Adobe AS3 Team
25 *
26 * Alternatively, the contents of this file may be used under the terms of
27 * either the GNU General Public License Version 2 or later (the "GPL"), or
28 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
37 *
38 * ***** END LICENSE BLOCK ***** */
39
40
41#include "MMgc.h"
42
43/* Notes on the quick list.
44 *
45 * Some programs have a high churn rate where objects are allocated and freed
46 * rapidly, and often freed explicitly - the AS2 interpreter is an example
47 * of this, another is AS3 reference counting. These programs benefit from
48 * an allocator that performs limited book-keeping during times of high
49 * allocation and deallocation and only occasionally reconciles information
50 * about allocated and freed objects into a convenient state. The quick list in
51 * GCAlloc provides such a facility within the current (Dec 2009) MMgc API.
52 *
53 * (The old allocator essentially performed eager coalescing of free objects:
54 * it would check whether a block was full, whether it needed sweeping,
55 * and so on. The old allocator also had reluctant allocation: objects
56 * would be preferred off a block's free list, and the tail end of a block
57 * would not be split into individual objects until the free objects had
58 * been exhausted: this added more tests, and made optimization harder.
59 * The quick list effectively implements deferred coalescing and eager
60 * allocation, and adds a few other optimizations.)
61 *
62 * The fast path for Alloc picks an object off the free list, sets up the
63 * header bits, and returns the object. The fast path for Free places the
64 * object on the quick list. Slow paths are triggered when the quick list
65 * is empty (in Alloc) or there's something about the object's block that
66 * requires special action (in Free). The amount of testing on the fast
67 * paths is kept to an absolute minimum. For example, when the collector
68 * starts sweeping it clears all the quick lists, and only the slow allocation
69 * path needs to test whether the object needs to be allocated pre-marked.
70 * Similarly, blocks that may contain objects that are weakly held or which must
71 * be swept are marked as such using a single, common flag, so the Free code only
72 * has one test on the fast path; only the slow path needs to test whether
73 * an object's weak reference must be cleared /or/ the block must actually
74 * be swept.
75 *
76 * Central to this optimization is that all free storage in a block is on
77 * the free list, and CreateChunk now explodes the block onto the free list
78 * in a tight loop. This gets rid of a test and branch in Alloc and lost
79 * optimizations resulting from that test.
80 *
81 * Generally we also try to open up for tail-call optimization throughout.
82 *
83 * Code that relies on the GC for storage reclamation benefits from these
84 * optimizations as well, as Alloc is substantially faster than before.
85 * The cost of sweeping is however about the same cost as before the quick
86 * list.
87 *
88 * So where's the cost?
89 *
90 * Primarily, each quick list will need to be coalesced into the free lists
91 * in the blocks whose free objects are on the quick list before sweeping
92 * (ie at the start and end of each GC cycle) and whenever there is an excess
93 * of free storage on the quick lists globally. Since the quick list
94 * coalescing has economy of scale where the old immediate coalescing did
95 * not we do it quicker than before, and in fast-allocating programs we will
96 * usually do it for vastly fewer objects, but it's not free.
97 *
98 * We also risk losing some incrementality during coalescing if the quick lists
99 * become very long. There's one quick list per allocator; for some very popular
100 * object types that are freed explicitly there's a chance that the quick lists
101 * could become long. We already have bad incrementality at most points where
102 * the quick lists must be coalesced, however, so it's doubtful that coalescing
103 * will make much of a difference.
104 *
105 * Another minor cost is a slight difference in accounting, since all objects
106 * in a block are on a free list - in the past, accounting did not count the
107 * free space at the end of a block, but now there's no such memory. In practice
108 * this is not expected to be significant, as there should be very little such
109 * free space globally in the system except in minuscule heaps.
110 *
111 * The final cost is that we must ensure that a large population of free objects
112 * does not build up and thus increase fragmentation system-wide. To counter
113 * this, the GC has a global quick list budget which is a constant number of
114 * bytes that it will allow on quick lists for all its allocators. Each allocator
115 * has a budget - initially one block's worth of data - and will ask to increase
116 * it when its quick list grows longer; that request will cause the GC to flush
117 * some quick lists if the requested increase is not available. Thus the total
118 * amount of fragmentation in the system resulting from the quick lists is always
119 * bounded by a constant (which is better than the amount of fragmentation
120 * resulting from the ordinary free lists - it is not bounded). The cost of
121 * this accounting is a per-allocator counter that is incremented/decremented
122 * at each allocation/deallocation and compared with zero. Regrettably this
123 * accounting is on the hot path and I've not yet found a way to coalesce it
124 * with the ordinary alloc/free accounting.
125 *
126 *
127 * Notes.
128 *
129 * An optimization that was tried but which is not yet in this code is propagating
130 * the value of the object index from Free/CreateChunk/Sweep to Alloc: Free must
131 * compute it, and both CreateChunk and Sweep can compute the index much more cheaply
132 * than Alloc can, so it makes sense to do so. Two facts stand in the way. One,
133 * on 64-bit system the object may only be one word long. Two, there's a system-wide
134 * invariant that free RC objects have a 'composite' field (the second word) whose
135 * value is zero (or some poison value, in DEBUG builds), and reference counting
136 * operations test the value of that field to ignore operations on objects that are
137 * in the process of being deleted. This protocol makes finalization work in the
138 * current system. See comments about this invariant in GCObject.h.
139 */
140
141namespace MMgc
142{
143#ifdef MMGC_FASTBITS
144 static uint32_t log2(uint32_t n)
145 {
146 uint32_t result = 0;
147 while (n > 1) {
148 result += 1;
149 n >>= 1;
150 }
151 return result;
152 }
153#endif
154
155 /*virtual*/
156 GCAllocBase::~GCAllocBase()
157 {
158 }
159
160 GCAlloc::GCAlloc(GC* _gc, int _itemSize, boolbool _containsPointers, boolbool _isRC, int _sizeClassIndex) :
161 m_firstBlock(NULL__null),
162 m_lastBlock(NULL__null),
163 m_firstFree(NULL__null),
164 m_needsSweeping(NULL__null),
165 m_qList(NULL__null),
166 m_qBudget(0),
167 m_qBudgetObtained(0),
168 m_itemSize((_itemSize+7)&~7), // Round itemSize to the nearest boundary of 8
169 m_itemsPerBlock((kBlockSize - sizeof(GCBlock)) / m_itemSize),
170 #ifdef MMGC_FASTBITS
171 m_bitsShift(log2(m_itemSize)),
172 m_numBitmapBytes(kBlockSize / (1 << m_bitsShift)),
173 #else
174 m_numBitmapBytes(((m_itemsPerBlock * sizeof(gcbits_t))+3)&~3), // round up to 4 bytes so we can go through the bits several items at a time
175 #endif
176 m_sizeClassIndex(_sizeClassIndex),
177 #ifdef MMGC_MEMORY_PROFILER
178 m_totalAskSize(0),
179 #endif
180 m_bitsInPage(_containsPointers && kBlockSize - int(m_itemsPerBlock * m_itemSize + sizeof(GCBlock)) >= m_numBitmapBytes),
181 multiple(ComputeMultiply((uint16_t)m_itemSize)),
182 shift(ComputeShift((uint16_t)m_itemSize)),
183 containsPointers(_containsPointers),
184 containsRCObjects(_isRC),
185 m_finalized(falsefalse),
186 m_gc(_gc)
187 {
188#ifdef DEBUG
189 int usedSpace = m_itemsPerBlock * m_itemSize + sizeof(GCBlock);
190#endif
191 GCAssert((unsigned)kBlockSize == GCHeap::kBlockSize)do { } while (0);
192 GCAssert(usedSpace <= kBlockSize)do { } while (0);
193 GCAssert(kBlockSize - usedSpace < (int)m_itemSize)do { } while (0);
194 GCAssert(m_itemSize < GCHeap::kBlockSize)do { } while (0);
195 m_gc->ObtainQuickListBudget(m_itemSize*m_itemsPerBlock);
196 m_qBudget = m_qBudgetObtained = m_itemsPerBlock;
197 }
198
199 GCAlloc::~GCAlloc()
200 {
201 CoalesceQuickList();
202
203 // Free all of the blocks
204 GCAssertMsg(GetNumAlloc() == 0, "You have leaks")do { } while (0);
205
206 while (m_firstBlock) {
207#ifdef MMGC_MEMORY_INFO
208 //check where any item within this block wasn't written to after being poisoned
209 VerifyFreeBlockIntegrity(m_firstBlock->firstFree, m_firstBlock->size);
210#endif //MMGC_MEMORY_INFO
211
212 GCBlock *b = m_firstBlock;
213 UnlinkChunk(b);
214 FreeChunk(b);
215 }
216 }
217
218 GCAlloc::GCBlock* GCAlloc::CreateChunk(int flags)
219 {
220 // Too many definitions of kBlockSize, make sure they're at least in sync.
221
222 GCAssert(uint32_t(kBlockSize) == GCHeap::kBlockSize)do { } while (0);
223
224 // Get bitmap space; this may trigger OOM handling.
225
226 gcbits_t* bits = m_bitsInPage ? NULL__null : (gcbits_t*)m_gc->AllocBits(m_numBitmapBytes, m_sizeClassIndex);
1
'?' condition is true
227
228 // Allocate a new block; this may trigger OOM handling (though that
229 // won't affect the bitmap space, which is not GC'd individually).
230
231 GCBlock* b = (GCBlock*) m_gc->AllocBlock(1, PageMap::kGCAllocPage, /*zero*/truetrue, (flags&GC::kCanFail) != 0);
232
233 if (b)
2
Taking true branch
234 {
235 VALGRIND_CREATE_MEMPOOL(b, 0/*redZoneSize*/, 1/*zeroed*/){};
236
237 // treat block header as a separate allocation
238 VALGRIND_MEMPOOL_ALLOC(b, b, sizeof(GCBlock)){};
239
240
241 b->gc = m_gc;
242 b->alloc = this;
243 b->size = m_itemSize;
244 b->slowFlags = 0;
245 if(m_gc->collecting && m_finalized)
3
Taking false branch
246 b->finalizeState = m_gc->finalizedValue;
247 else
248 b->finalizeState = !m_gc->finalizedValue;
249
250#ifdef MMGC_FASTBITS
251 b->bitsShift = (uint8_t) m_bitsShift;
252#endif
253 b->containsPointers = ContainsPointers();
254 b->rcobject = ContainsRCObjects();
255
256 if (m_bitsInPage)
4
Taking false branch
257 b->bits = (gcbits_t*)b + sizeof(GCBlock);
258 else
259 b->bits = bits;
260
261 // ditto for in page bits
262 if (m_bitsInPage) {
5
Taking false branch
263 VALGRIND_MEMPOOL_ALLOC(b, b->bits, m_numBitmapBytes){};
264 }
265
266 // Link the block at the end of the list
267 b->prev = m_lastBlock;
268 b->next = 0;
269
270 if (m_lastBlock) {
6
Taking false branch
271 m_lastBlock->next = b;
272 }
273 if (!m_firstBlock) {
7
Taking false branch
274 m_firstBlock = b;
275 }
276 m_lastBlock = b;
277
278 // Add our new ChunkBlock to the firstFree list (which should be empty)
279 if (m_firstFree)
8
Taking false branch
280 {
281 GCAssert(m_firstFree->prevFree == 0)do { } while (0);
282 m_firstFree->prevFree = b;
283 }
284 b->nextFree = m_firstFree;
285 b->prevFree = 0;
286 m_firstFree = b;
287
288 // calculate back from end (better alignment, no dead space at end)
289 b->items = (char*)b+GCHeap::kBlockSize - m_itemsPerBlock * m_itemSize;
290 b->numFree = (short)m_itemsPerBlock;
291
292 // explode the new block onto its free list
293 //
294 // We must make the object look free, which means poisoning it properly and setting
295 // the mark bits correctly.
296
297 b->firstFree = b->items;
298 void** p = (void**)(void*)b->items;
299 int limit = m_itemsPerBlock-1;
300#ifdef MMGC_HOOKS
301 GCHeap* heap = GCHeap::GetGCHeap();
302#endif
303 for ( int i=0 ; i < limit ; i++ ) {
9
Loop condition is false. Execution continues on line 317
304#ifdef MMGC_HOOKS
305#ifdef MMGC_MEMORY_INFO // DebugSize is 0 if MEMORY_INFO is off, so we get an "obviously true" warning from GCC.
306 GCAssert(m_itemSize >= DebugSize())do { } while (0);
307#endif
308 if(heap->HooksEnabled())
309 heap->PseudoFreeHook(GetUserPointer(p)p, m_itemSize - DebugSize()0, uint8_t(GCHeap::GCSweptPoison));
310#endif
311 p = FLSeed(p, (char*)p + m_itemSize);
312 }
313#ifdef MMGC_HOOKS
314 if(heap->HooksEnabled())
315 heap->PseudoFreeHook(GetUserPointer(p)p, m_itemSize - DebugSize()0, uint8_t(GCHeap::GCSweptPoison));
316#endif
317 p[0] = NULL__null;
318
319 // Set all the mark bits to 'free'
320
321 GCAssert(sizeof(gcbits_t) == 1)do { } while (0);
322 GCAssert(kFreelist == 3)do { } while (0);
323 GCAssert(m_numBitmapBytes % 4 == 0)do { } while (0);
324
325 uint32_t *pbits = (uint32_t*)(void *)b->bits;
326 for(int i=0, n=m_numBitmapBytes>>2; i < n; i++)
10
Loop condition is true. Entering loop body
327 pbits[i] = 0x03030303;
11
Array access (from variable 'pbits') results in a null pointer dereference
328
329#ifdef MMGC_MEMORY_INFO
330 VerifyFreeBlockIntegrity(b->firstFree, m_itemSize);
331#endif
332 }
333 else {
334 if (bits)
335 m_gc->FreeBits((uint32_t*)(void *)bits, m_sizeClassIndex);
336 }
337
338 return b;
339 }
340
341 void GCAlloc::UnlinkChunk(GCBlock *b)
342 {
343 GCAssert(!b->needsSweeping())do { } while (0);
344 if ( ((b->prevFree && (b->prevFree->nextFree!=b))) ||
345 ((b->nextFree && (b->nextFree->prevFree!=b))) )
346 VMPI_abort::abort();
347
348 // Unlink the block from the list
349 if (b == m_firstBlock) {
350 m_firstBlock = Next(b);
351 } else {
352 b->prev->next = Next(b);
353 }
354
355 if (b == m_lastBlock) {
356 m_lastBlock = b->prev;
357 } else {
358 Next(b)->prev = b->prev;
359 }
360
361 if(b->nextFree || b->prevFree || b == m_firstFree) {
362 RemoveFromFreeList(b);
363 }
364#ifdef _DEBUG
365 b->next = b->prev = NULL__null;
366 b->nextFree = b->prevFree = NULL__null;
367#endif
368 }
369
370 void GCAlloc::FreeChunk(GCBlock* b)
371 {
372 GCAssert(b->numFree == m_itemsPerBlock)do { } while (0);
373 if(!m_bitsInPage) {
374 VMPI_memset::memset(b->bits, 0, m_numBitmapBytes);
375 m_gc->FreeBits((uint32_t*)(void *)b->bits, m_sizeClassIndex);
376 b->bits = NULL__null;
377 } else {
378 // Only free bits if they were in page, see CreateChunk.
379 VALGRIND_MEMPOOL_FREE(b, b->bits){};
380 }
381
382 VALGRIND_MEMPOOL_FREE(b, b){};
383
384 // Free the memory
385 m_gc->FreeBlock(b, 1);
386
387 VALGRIND_DESTROY_MEMPOOL(b){};
388 }
389
390#if defined DEBUG || defined MMGC_MEMORY_PROFILER
391 void* GCAlloc::Alloc(size_t askSize, int flags)
392#else
393 void* GCAlloc::Alloc(int flags)
394#endif
395 {
396#ifdef DEBUG
397 m_gc->heap->CheckForOOMAbortAllocation();
398#endif
399
400 // Allocation must be signalled before we allocate because no GC work must be allowed to
401 // come between an allocation and an initialization - if it does, we may crash, as
402 // GCFinalizedObject subclasses may not have a valid vtable, but the GC depends on them
403 // having it. In principle we could signal allocation late but only set the object
404 // flags after signaling, but we might still cause trouble for the profiler, which also
405 // depends on non-interruptibility.
406
407 m_gc->SignalAllocWork(m_itemSize);
408
409 GCAssertMsg(((size_t)m_itemSize >= askSize), "allocator itemsize too small")do { } while (0);
410 GCAssert(!m_gc->collecting || m_qList == NULL)do { } while (0);
411
412#ifdef MMGC_MEMORY_INFO
413 if (m_qList != NULL__null) {
414 //check for writes on deleted memory
415 //the quick list can be long so limit the check to the first 20 to
416 //avoid slowing down debug builds too much.
417 VerifyFreeBlockIntegrity(m_qList, m_itemSize, 20);
418 }
419#endif
420
421 if (m_qList == NULL__null) {
422#if defined DEBUG || defined MMGC_MEMORY_PROFILER
423 return AllocSlow(askSize, flags);
424#else
425 return AllocSlow(flags);
426#endif
427 }
428
429#if defined DEBUG || defined MMGC_MEMORY_PROFILER
430 return AllocFromQuickList(askSize, flags);
431#else
432 return AllocFromQuickList(flags);
433#endif
434 }
435
436#if defined DEBUG || defined MMGC_MEMORY_PROFILER
437 REALLY_INLINEinline __attribute__((always_inline)) void* GCAlloc::AllocFromQuickList(size_t askSize, int flags)
438#else
439 REALLY_INLINEinline __attribute__((always_inline)) void* GCAlloc::AllocFromQuickList(int flags)
440#endif
441 {
442 // This is absurd.
443#if defined DEBUG || defined MMGC_MEMORY_PROFILER
444 (void)askSize;
445#endif
446 void *item = FLPopAndZero(m_qList);
447 GCBlock* b = GetBlock(item);
448
449 GCAssert(!b->needsSweeping())do { } while (0);
450
451 // Code below uses these optimizations
452 GCAssert((unsigned long)GC::kFinalize == (unsigned long)kFinalizable)do { } while (0);
453 GCAssert((unsigned long)GC::kInternalExact == (unsigned long)kVirtualGCTrace)do { } while (0);
454
455#if defined VMCFG_EXACT_TRACING
456 b->bits[GetBitsIndex(b, item)] = (flags & (GC::kFinalize|GC::kInternalExact));
457#elif defined VMCFG_SELECTABLE_EXACT_TRACING
458 b->bits[GetBitsIndex(b, item)] = (flags & (GC::kFinalize|m_gc->runtimeSelectableExactnessFlag)); // 0 or GC::kInternalExact
459#else
460 b->bits[GetBitsIndex(b, item)] = (flags & GC::kFinalize);
461#endif
462
463#ifdef MMGC_HOOKS
464 GCHeap* heap = GCHeap::GetGCHeap();
465 if(heap->HooksEnabled()) {
466 size_t userSize = m_itemSize - DebugSize()0;
467#ifdef MMGC_MEMORY_PROFILER
468 // Only add if the profiler is installed since we can only
469 // subtract if the profiler is installed.
470 if(heap->GetProfiler())
471 m_totalAskSize += askSize;
472 heap->AllocHook(GetUserPointer(item)item, askSize, userSize, /*managed=*/truetrue);
473#else
474 heap->AllocHook(GetUserPointer(item)item, 0, userSize, /*managed=*/truetrue);
475#endif
476 }
477#endif // MMGC_HOOKS
478
479 m_qBudget++;
480
481 VALGRIND_MEMPOOL_ALLOC(b, item, m_itemSize){};
482
483 return item;
484 }
485
486#if defined DEBUG || defined MMGC_MEMORY_PROFILER
487 void *GCAlloc::AllocSlow(size_t askSize, int flags)
488#else
489 void *GCAlloc::AllocSlow(int flags)
490#endif
491 {
492 GCBlock* b = m_firstFree;
493
494 while (b == NULL__null) {
495 if (m_needsSweeping && !m_gc->collecting) {
496 Sweep(m_needsSweeping);
497 b = m_firstFree;
498 if (b != NULL__null)
499 break;
500 }
501 else {
502 boolbool canFail = (flags & GC::kCanFail) != 0;
503 CreateChunk(canFail);
504 b = m_firstFree;
505 if (b != NULL__null)
506 break;
507 GCAssert(canFail)do { } while (0);
508 return NULL__null;
509 }
510 }
511
512 GCAssert(!b->needsSweeping())do { } while (0);
513 GCAssert(b == m_firstFree)do { } while (0);
514 GCAssert(b->firstFree != NULL)do { } while (0);
515
516 if (!m_gc->collecting && !m_gc->greedy)
517 {
518 // Fast path: fill the quick list from b, then tail-call AllocFromQuickList()
519 FillQuickList(b);
520 GCAssert(m_qList != NULL)do { } while (0);
521#if defined DEBUG || defined MMGC_MEMORY_PROFILER
522 return AllocFromQuickList(askSize, flags);
523#else
524 return AllocFromQuickList(flags);
525#endif
526 }
527 else
528 {
529 // Slow path: We basically don't care how expensive this is, so
530 // punt: fill the quick list, alloc, set the bits, then coalesce
531 // to clear the list and establish the situation that triggers
532 // slow-path allocation.
533
534 FillQuickList(b);
535 GCAssert(m_qList != NULL)do { } while (0);
536#if defined DEBUG || defined MMGC_MEMORY_PROFILER
537 void *item = AllocFromQuickList(askSize, flags);
538#else
539 void *item = AllocFromQuickList(flags);
540#endif
541 if(m_gc->collecting && b->finalizeState != m_gc->finalizedValue) {
542 b->bits[GetBitsIndex(b, item)] |= kMark;
543 }
544 CoalesceQuickList();
545 GCAssert(m_qList == NULL)do { } while (0);
546
547 return item;
548 }
549 }
550
551 void GCAlloc::FillQuickList(GCBlock* b)
552 {
553 GCAssert(m_qList == NULL)do { } while (0);
554
555 // Check the budget first to avoid freeing the quick list if this allocator
556 // is the next victim.
557
558 if (m_qBudget < b->numFree) {
559 m_gc->ObtainQuickListBudget(m_itemsPerBlock*m_itemSize);
560 m_qBudget += m_itemsPerBlock;
561 m_qBudgetObtained += m_itemsPerBlock;
562 }
563
564 m_qList = (void**)b->firstFree;
565 m_qBudget -= b->numFree;
566 b->numFree = 0;
567 b->firstFree = NULL__null;
568 RemoveFromFreeList(b);
569 }
570
571 /* virtual */
572 void GCAlloc::Free(const void *item) // item is realPtr
573 {
574 GCBlock *b = GetBlock(item);
575 int bitsindex = GetBitsIndex(b, item);
576
577 // We can't allow free'ing something during sweeping - it messes up
578 // the per-block statistics - or anything that's on a mark queue.
579
580 GCAssert(m_gc->collecting == false || m_gc->marking == true)do { } while (0);
581 if (m_gc->marking && (m_gc->collecting || b->bits[bitsindex] & kQueued)) {
582 m_gc->AbortFree(GetUserPointer(item)item);
583 return;
584 }
585
586#ifdef _DEBUG
587 VerifyNotFree(b, item);
588
589 // RCObject have contract that they must clean themselves, since they
590 // have to scan themselves to decrement other RCObjects they might as well
591 // clean themselves too, better than suffering a memset later
592 if(b->rcobject)
593 m_gc->RCObjectZeroCheck((RCObject*)GetUserPointer(item)item);
594#endif
595
596#ifdef MMGC_HOOKS
597 GCHeap* heap = GCHeap::GetGCHeap();
598 if(heap->HooksEnabled())
599 {
600 const void* p = GetUserPointer(item)item;
601 size_t userSize = GC::Size(p);
602#ifdef MMGC_MEMORY_PROFILER
603 if(heap->GetProfiler())
604 m_totalAskSize -= heap->GetProfiler()->GetAskSize(p);
605#endif
606 heap->FinalizeHook(p, userSize);
607 heap->FreeHook(p, userSize, uint8_t(GCHeap::GCFreedPoison));
608 }
609#endif
610
611 // We must set the kFreelist bit here.
612 //
613 // Sweeping and finalization depend on kFreelist being set on free objects:
614 // both skip items on the free list. For sweeping we could set it in the already
615 // expensive must-be-swept logic. For finalization we could move the setting
616 // into CoalesceQuickList, where it would be set only for objects flushed to
617 // the regular free lists. It would be a savings overall if the quick list is
618 // busy, as we expect it to be.
619 //
620 // However, when an object is freed explicitly, or freed by the reaper, there
621 // may still be pointers to that object in the GC'd heap (these could be
622 // uncleared pointers, untracked pointers, or misidentified pointers). If
623 // the object is not marked as free the dead object may be be marked, which
624 // would mean that it may end up on both a free list and on the mark stack, and
625 // that would be bad; it could also mean that objects reachable from the
626 // dead object would be marked in turn and would be retained for a GC cycle.
627
628 b->bits[bitsindex] |= kFreelist; // Don't clear the weak ref bit, FreeSlow may inspect it
629
630 if (b->slowFlags) // needs sweeping, or may have weak refs
631 {
632 FreeSlow(b, bitsindex, item);
633 return;
634 }
635
636 GCAssert(!b->needsSweeping())do { } while (0);
637 GCAssert(!(b->bits[bitsindex] & kHasWeakRef))do { } while (0);
638
639#ifndef _DEBUG
640 ClearNonRCObject((void*)item, b->size);
641#endif
642
643 FLPush(m_qList, item);
644
645 VALGRIND_MEMPOOL_FREE(b, item){};
646
647 m_gc->SignalFreeWork(m_itemSize);
648 if (--m_qBudget <= 0)
649 QuickListBudgetExhausted();
650 }
651
652 void GCAlloc::FreeSlow(GCBlock* b, int bitsindex, const void* item)
653 {
654 if(b->bits[bitsindex] & kHasWeakRef)
655 b->gc->ClearWeakRef(GetUserPointer(item)item);
656
657#ifndef _DEBUG
658 ClearNonRCObject((void*)item, b->size);
659#endif
660 boolbool blockSwept = falsefalse;
661
662 if (b->needsSweeping()) {
663 // See comment in GCAlloc::Free
664 //b->bits[bitsindex] = kFreelist;
665
666 // We know that the quick list does not have any items from the block b,
667 // so we can push the object onto the block's free list and sweep the block.
668 // Make the quick list NULL while we do that.
669 void* qList = m_qList;
670 m_qList = NULL__null;
671
672 FLPush(b->firstFree, item);
673 b->numFree++;
674
675 blockSwept = Sweep(b);
676
677 m_qList = qList;
678 }
679 else {
680 *(void**)item = m_qList;
681 m_qList = (void**)item;
682
683 if (--m_qBudget <= 0)
684 QuickListBudgetExhausted();
685 }
686 if (!blockSwept)
687 VALGRIND_MEMPOOL_FREE(b, item){};
688 }
689
690 REALLY_INLINEinline __attribute__((always_inline)) void GCAlloc::ClearNonRCObject(void* item, size_t size)
691 {
692 // memset rest of item not including free list pointer, in _DEBUG
693 // we poison the memory (and clear in Alloc)
694 //
695 // BTW, experiments show that clearing on alloc instead of on free
696 // benefits microbenchmark that do massive amounts of double-boxing,
697 // but nothing else enough to worry about it. (The trick is that
698 // no clearing on alloc is needed when carving objects off the end
699 // of a block, whereas every object is cleared on free even if the
700 // page is subsequently emptied out and returned to the block manager.
701 // Massively boxing programs have alloc/free patterns that are biased
702 // toward non-RC objects carved off the ends of blocks.)
703 //
704 // As it is, we 'clear' in CreateChunk so it's good for all objects
705 // on the free list to be cleared uniformly, even if it adds an
706 // additional test to the hot path in Free(). We could in principle
707 // merge that test with b->slowFlags if we think RCObjects are likely
708 // to predominate on the path for GCAlloc::Free - not obvious, but
709 // credible.
710 if(!ContainsRCObjects())
711 VMPI_memset::memset((char*)item, 0, size);
712 }
713
714 // True about objects on the quick list:
715 //
716 // - The numFree counter in their blocks does not account for the free objects
717 // on the quick list
718 // - They never belong to a block that must be swept - Free() checks whether
719 // a page needs sweeping
720 // - They all have kFreelist set
721
722 void GCAlloc::CoalesceQuickList()
723 {
724 void* item = m_qList;
725
726 m_qList = NULL__null;
727
728 while (item != NULL__null) {
729 void *next = FLNext(item);
730 GCBlock *b = GetBlock(item);
731
732 GCAssert(!b->needsSweeping())do { } while (0);
733 if (b->numFree == 0)
734 AddToFreeList(b);
735
736 b->numFree++;
737 // See comment in GCAlloc::Free
738 //int bitsindex = GetBitsIndex(b, item);
739 //b->bits[bitsindex] = kFreelist;
740
741 // The object was cleared in Free() or will be cleared in Alloc(), but
742 // we need to link it onto the block's free list.
743
744#ifdef _DEBUG
745 VerifyNotFree(b, item);
746#endif
747
748 FLPush(b->firstFree, item);
749 item = next;
750 }
751
752 if (m_qBudgetObtained > m_itemsPerBlock) {
753 m_gc->RelinquishQuickListBudget((m_qBudgetObtained - m_itemsPerBlock)*m_itemSize);
754 m_qBudgetObtained = m_itemsPerBlock;
755 }
756 m_qBudget = m_qBudgetObtained;
757
758 // Faster to do this check once per block than once per object.
759
760 GCBlock *b = m_firstFree;
761 while (b != NULL__null) {
762 GCBlock* next = Next(b);
763 if(b->numFree == m_itemsPerBlock && !b->needsSweeping()) {
764 UnlinkChunk(b);
765 FreeChunk(b);
766 }
767 b = next;
768 }
769 }
770
771 void GCAlloc::QuickListBudgetExhausted()
772 {
773 m_gc->ObtainQuickListBudget(m_itemsPerBlock*m_itemSize);
774 m_qBudgetObtained += m_itemsPerBlock;
775 m_qBudget += m_itemsPerBlock;
776 }
777
778#ifdef _DEBUG
779 void GCAlloc::VerifyNotFree(GCBlock* b, const void* item)
780 {
781 for ( void *free = m_qList ; free != NULL__null ; free = FLNext(free) )
782 GCAssert(free != item)do { } while (0);
783
784 for ( void *free = b->firstFree ; free != NULL__null ; free = FLNext(free) )
785 GCAssert(free != item)do { } while (0);
786 }
787#endif
788
789 void GCAlloc::Finalize()
790 {
791 m_finalized = truetrue;
792 // Go through every item of every block. Look for items
793 // that are in use but not marked as reachable, and delete
794 // them.
795
796 GCBlock *next = NULL__null;
797 for (GCBlock* b = m_firstBlock; b != NULL__null; b = next)
798 {
799 // we can unlink block below
800 next = Next(b);
801
802 GCAssert(!b->needsSweeping())do { } while (0);
803
804 // remove from freelist to avoid mutator destructor allocations
805 // from using this block
806 boolbool putOnFreeList = falsefalse;
807 if(m_firstFree == b || b->prevFree != NULL__null || b->nextFree != NULL__null) {
808 putOnFreeList = truetrue;
809 RemoveFromFreeList(b);
810 }
811
812 GCAssert(kMark == 0x1 && kFinalizable == 0x4 && kHasWeakRef == 0x8)do { } while (0);
813
814 int numMarkedItems = 0;
815
816 gcbits_t* blockbits = b->bits;
817 for ( char *item = b->items, *limit = b->items + m_itemSize * b->GetCount() ; item < limit ; item += m_itemSize )
818 {
819 gcbits_t& marks = blockbits[GetBitsIndex(b,item)];
820 int mq = marks & kFreelist;
821 if(mq == kFreelist)
822 continue;
823
824 if(mq == kMark) {
825 numMarkedItems++;
826 continue;
827 }
828
829 GCAssertMsg(mq != kQueued, "No queued objects should exist when finalizing")do { } while (0);
830
831 // GC::Finalize calls GC::MarkOrClearWeakRefs before calling GCAlloc::Finalize,
832 // ergo there should be no unmarked objects with weak refs.
833
834 GCAssertMsg(!(marks & kHasWeakRef), "No unmarked object should have a weak ref at this point")do { } while (0);
835
836#ifdef MMGC_HOOKS
837 if(m_gc->heap->HooksEnabled())
838 {
839 #ifdef MMGC_MEMORY_PROFILER
840 if(m_gc->heap->GetProfiler())
841 m_totalAskSize -= m_gc->heap->GetProfiler()->GetAskSize(GetUserPointer(item)item);
842 #endif
843
844 m_gc->heap->FinalizeHook(GetUserPointer(item)item, m_itemSize - DebugSize()0);
845 }
846#endif
847
848 if (marks & kFinalizable)
849 {
850 GCFinalizedObject *obj = (GCFinalizedObject*)(void *)GetUserPointer(item)item;
851 marks &= ~kFinalizable; // Clear bits first so we won't get second finalization if finalizer longjmps out
852
853 /* See https://bugzilla.mozilla.org/show_bug.cgi?id=573737 for the case where the object might remain in
854 * uninitialized state and thus crash occurs while calling the dtor below.
855 */
856 if(*(intptr_t*)obj != 0)
857 obj->~GCFinalizedObject();
858
859#if defined(_DEBUG)
860 if(((GCAlloc*)b->alloc)->ContainsRCObjects()) {
861 m_gc->RCObjectZeroCheck((RCObject*)obj);
862 }
863#endif
864 }
865
866 // GC::GetWeakRef will not allow a weak reference to be created to an object that
867 // is ready for destruction.
868
869 GCAssertMsg(!(marks & kHasWeakRef), "No unmarked object should have a weak ref at this point")do { } while (0);
870 }
871
872 // 3 outcomes:
873 // 1) empty, put on list of empty pages
874 // 2) no freed items, partially empty or full, return to free if partially empty
875 // 3) some freed item add to the to be swept list
876 if(numMarkedItems == 0) {
877 // add to list of block to be returned to the Heap after finalization
878 // we don't do this during finalization b/c we want finalizers to be able
879 // to reference the memory of other objects being finalized
880 UnlinkChunk(b);
881 b->gc->AddToSmallEmptyBlockList(b);
882 putOnFreeList = falsefalse;
883 } else if(numMarkedItems == (m_itemsPerBlock - b->numFree)) {
884 // nothing changed on this page, clear marks
885 // note there will be at least one free item on the page (otherwise it
886 // would not have been scanned) so the page just stays on the freelist
887 ClearMarks(b);
888 } else if(!b->needsSweeping()) {
889 // free'ing some items but not all
890 if(b->nextFree || b->prevFree || b == m_firstFree) {
891 RemoveFromFreeList(b);
892 b->nextFree = b->prevFree = NULL__null;
893 }
894 AddToSweepList(b);
895 putOnFreeList = falsefalse;
896 }
897 b->finalizeState = m_gc->finalizedValue;
898 if(putOnFreeList)
899 AddToFreeList(b);
900 }
901 }
902
903 void GCAlloc::SweepGuts(GCBlock *b)
904 {
905 gcbits_t* blockbits = b->bits;
906 for ( char *item = b->items, *limit = b->items + m_itemSize * b->GetCount() ; item < limit ; item += m_itemSize )
907 {
908 uint32_t bitsindex = GetBitsIndex(b, item);
909 gcbits_t& marks = blockbits[bitsindex];
910
911 int mq = marks & kFreelist;
912 if(mq == kMark || mq == kQueued) // Sweeping is lazy; don't sweep objects on the mark stack
913 {
914 // live item, clear bits
915 marks &= ~kFreelist;
916 continue;
917 }
918
919 if(mq == kFreelist)
920 continue; // freelist item, ignore
921
922 // garbage, freelist it
923#ifdef MMGC_HOOKS
924 if(m_gc->heap->HooksEnabled())
925 m_gc->heap->FreeHook(GetUserPointer(item)item, b->size - DebugSize()0, uint8_t(GCHeap::GCSweptPoison));
926#endif
927 b->FreeSweptItem(item, bitsindex);
928 }
929 }
930
931 boolbool GCAlloc::Sweep(GCBlock *b)
932 {
933 GCAssert(b->needsSweeping())do { } while (0);
934 GCAssert(m_qList == NULL)do { } while (0);
935 RemoveFromSweepList(b);
936
937 SweepGuts(b);
938
939 if(b->numFree == m_itemsPerBlock)
940 {
941 UnlinkChunk(b);
942 FreeChunk(b);
943 return truetrue;
944 }
945
946 AddToFreeList(b);
947
948 return falsefalse;
949 }
950
951 void GCAlloc::SweepNeedsSweeping()
952 {
953 GCBlock* next;
954 for (GCBlock* b = m_needsSweeping; b != NULL__null; b = next)
955 {
956 next = b->nextFree;
957 Sweep(b);
958 }
959 GCAssert(m_needsSweeping == NULL)do { } while (0);
960 }
961
962 void GCAlloc::ClearMarks(GCAlloc::GCBlock* block)
963 {
964 GCAssert(m_qList == NULL)do { } while (0);
965 // Clear all the mark bits
966 GCAssert(sizeof(gcbits_t) == 1)do { } while (0);
967 GCAssert(kFreelist == 3)do { } while (0);
968 GCAssert(m_numBitmapBytes % 4 == 0)do { } while (0);
969 uint32_t *pbits = (uint32_t*)(void *)block->bits;
970 uint32_t mq32 = ~uint32_t(0x03030303U);
971
972 // Clear the marked and queued bits
973 // TODO: MMX version for IA32
974 for(int i=0, n=m_numBitmapBytes>>2; i < n; i++) {
975 pbits[i] &= mq32;
976 }
977
978 void *item = block->firstFree;
979 while(item != NULL__null) {
980 // Set freelist bit pattern. If it's free it's free so clear all
981 // the other bits.
982 block->bits[GetBitsIndex(block, item)] = kFreelist;
983 item = FLNext(item);
984 }
985 }
986
987 void GCAlloc::ClearMarks()
988 {
989 for ( GCBlock *block=m_firstBlock, *next ; block ; block=next ) {
990 next = Next(block);
991
992 if (block->needsSweeping() && Sweep(block))
993 continue;
994
995 ClearMarks(block);
996 }
997 }
998
999#ifdef _DEBUG
1000 void GCAlloc::CheckMarks()
1001 {
1002 GCBlock *b = m_firstBlock;
1003
1004 while (b) {
1005 GCBlock *next = Next(b);
1006 GCAssertMsg(!b->needsSweeping(), "All needsSweeping should have been swept at this point.")do { } while (0);
1007
1008 for ( char *item = b->items, *limit = b->items + m_itemSize * b->GetCount() ; item < limit ; item += m_itemSize ) {
1009 gcbits_t m = GC::GetGCBits(item) & kFreelist;
1010 GCAssertMsg(m == 0 || m == kFreelist, "All items should be free or clear, nothing should be marked or queued.")do { } while (0);
1011 }
1012
1013 // Advance to next block
1014 b = next;
1015 }
1016 }
1017
1018 /*static*/
1019 int GCAlloc::ConservativeGetMark(const void *item, boolbool bogusPointerReturnValue)
1020 {
1021 GCBlock *block = GetBlock(item);
1022
1023#ifdef MMGC_MEMORY_INFO
1024 item = GetRealPointer(item)item;
1025#endif
1026
1027 // guard against bogus pointers to the block header
1028 if (item < block->items)
1029 return bogusPointerReturnValue;
1030
1031 // floor value to start of item
1032 int itemNum = GetObjectIndex(block, item);
1033
1034 // skip pointers into dead space at end of block
1035 // FIXME: not clear there is any dead space with current design
1036 if (itemNum > ((GCAlloc*)block->alloc)->m_itemsPerBlock - 1)
1037 return bogusPointerReturnValue;
1038
1039 // skip pointers into objects
1040 if(block->items + itemNum * block->size != item)
1041 return bogusPointerReturnValue;
1042
1043 return GC::GetMark(item);
1044 }
1045
1046 void GCAlloc::CheckFreelist()
1047 {
1048 GCBlock *b = m_firstFree;
1049 while(b)
1050 {
1051 void *freelist = b->firstFree;
1052 while(freelist)
1053 {
1054 // b->firstFree should be either 0 end of free list or a pointer into b, otherwise, someone
1055 // wrote to freed memory and hosed our freelist
1056 GCAssert(freelist == 0 || ((uintptr_t) freelist >= (uintptr_t) b->items && (uintptr_t) freelist < (uintptr_t) b + GCHeap::kBlockSize))do { } while (0);
1057 freelist = FLNext(freelist);
1058 }
1059 b = b->nextFree;
1060 }
1061 }
1062
1063#endif // _DEBUG
1064
1065 // allows us to avoid division in GetItemIndex, kudos to Tinic
1066 static void ComputeMultiplyShift(uint16_t d, uint16_t &muli, uint16_t &shft)
1067 {
1068 uint32_t s = 0;
1069 uint32_t n = 0;
1070 uint32_t m = 0;
1071 for ( ; n < ( 1 << 13 ) ; s++) {
1072 m = n;
1073 n = ( ( 1 << ( s + 1 ) ) / d ) + 1;
1074 }
1075 shft = (uint16_t) s - 1;
1076 muli = (uint16_t) m;
1077 }
1078
1079 uint16_t GCAlloc::ComputeMultiply(uint16_t d)
1080 {
1081 uint16_t m, s;
1082 ComputeMultiplyShift(d, m, s);
1083 return m;
1084 }
1085
1086 uint16_t GCAlloc::ComputeShift(uint16_t d)
1087 {
1088 uint16_t m, s;
1089 ComputeMultiplyShift(d, m, s);
1090 return s;
1091 }
1092
1093 REALLY_INLINEinline __attribute__((always_inline)) void GCAlloc::GCBlock::FreeSweptItem(const void *item, int bitsindex)
1094 {
1095 GCAlloc* alloc = (GCAlloc*)this->alloc;
1096#ifdef _DEBUG
1097 // Check that we're not freeing something on the mark stack
1098 GCAssert((bits[bitsindex] & kQueued) == 0)do { } while (0);
1099 alloc->VerifyNotFree(this, item);
1100#endif
1101
1102 numFree++;
1103
1104 bits[bitsindex] = kFreelist;
1105
1106#ifndef _DEBUG
1107 alloc->ClearNonRCObject((void*)item, size);
1108#endif
1109 FLPush(firstFree, item);
1110 VALGRIND_MEMPOOL_FREE(this, item){};
1111 }
1112
1113 void GCAlloc::GetUsageInfo(size_t& totalAskSize, size_t& totalAllocated)
1114 {
1115 int numAlloc, maxAlloc;
1116 GetAllocStats(numAlloc, maxAlloc);
1117 totalAllocated = numAlloc * m_itemSize;
1118#ifdef MMGC_MEMORY_PROFILER
1119 totalAskSize = m_totalAskSize;
1120#else
1121 totalAskSize = 0;
1122#endif
1123 }
1124
1125#ifdef MMGC_MEMORY_INFO
1126
1127 /* static */
1128 void GCAlloc::VerifyFreeBlockIntegrity(void* item, uint32_t size, uint32_t limit)
1129 {
1130 // go through every item on the free list and make sure it wasn't written to
1131 // after being poisoned.
1132 while(item)
1133 {
1134 if (--limit == 0)
1135 break;
1136#ifdef MMGC_64BIT
1137 int n = (size >> 2) - 3;
1138#else
1139 int n = (size >> 2) - 1;
1140#endif
1141
1142 int startIndex = (int)((uint32_t*)item - (uint32_t*)GetRealPointer(item)item);
1143
1144 for(int i=startIndex; i<n; i++)
1145 {
1146 uint32_t data = ((uint32_t*)item)[i];
1147 if(data != uint32_t(GCHeap::GCSweptPoison) && data != uint32_t(GCHeap::GCFreedPoison))
1148 {
1149 ReportDeletedMemoryWrite(item);
1150 break;
1151 }
1152 }
1153 // next free item
1154 item = FLNext(item);
1155 }
1156 }
1157
1158#endif //MMGC_MEMORY_INFO
1159
1160 void GCAlloc::GetAllocStats(int& numAlloc, int& maxAlloc) const
1161 {
1162 numAlloc = 0;
1163 maxAlloc = 0;
1164 GCBlock *b=m_firstBlock;
1165 while (b)
1166 {
1167 maxAlloc++;
1168 numAlloc += (m_itemsPerBlock - b->numFree);
1169 b = Next(b);
1170 }
1171 numAlloc -= (m_qBudgetObtained - m_qBudget);
1172 }
1173
1174#ifdef _DEBUG
1175 boolbool GCAlloc::IsOnEitherList(GCBlock *b)
1176 {
1177 return b->nextFree != NULL__null || b->prevFree != NULL__null || b == m_firstFree || b == m_needsSweeping;
1178 }
1179
1180 /*static*/
1181 boolbool GCAlloc::IsPointerIntoGCObject(const void *item)
1182 {
1183 // Contorted code allows for debugging, don't "optimize" this
1184 GCBlock *block = GetBlock(item);
1185 boolbool retval = falsefalse;
1186 if(item >= block->items)
1187 retval = (GC::GetGCBits(block->items + GetObjectIndex(block, item) * ((GCAlloc*)block->alloc)->m_itemSize) & kFreelist) != kFreelist;
1188 return retval;
1189 }
1190
1191 /*static*/
1192 boolbool GCAlloc::IsWhite(const void *item)
1193 {
1194 GCBlock *block = GetBlock(item);
1195 // not a real item
1196 if(item < block->items)
1197 return falsefalse;
1198
1199 if(FindBeginning(item) != item)
1200 return falsefalse;
1201
1202 return (GC::GetGCBits(item) & (kMark|kQueued)) == 0;
1203 }
1204#endif // _DEBUG
1205}