File: | platform/mac/avmshell/../../../MMgc/GCHeap.cpp |
Location: | line 2913, column 16 |
Description: | Value stored to 'currentTotal' during its initialization is never read |
1 | /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */ |
2 | /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */ |
3 | /* ***** BEGIN LICENSE BLOCK ***** |
4 | * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
5 | * |
6 | * The contents of this file are subject to the Mozilla Public License Version |
7 | * 1.1 (the "License"); you may not use this file except in compliance with |
8 | * the License. You may obtain a copy of the License at |
9 | * http://www.mozilla.org/MPL/ |
10 | * |
11 | * Software distributed under the License is distributed on an "AS IS" basis, |
12 | * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
13 | * for the specific language governing rights and limitations under the |
14 | * License. |
15 | * |
16 | * The Original Code is [Open Source Virtual Machine.]. |
17 | * |
18 | * The Initial Developer of the Original Code is |
19 | * Adobe System Incorporated. |
20 | * Portions created by the Initial Developer are Copyright (C) 2004-2006 |
21 | * the Initial Developer. All Rights Reserved. |
22 | * |
23 | * Contributor(s): |
24 | * Adobe AS3 Team |
25 | * |
26 | * Alternatively, the contents of this file may be used under the terms of |
27 | * either the GNU General Public License Version 2 or later (the "GPL"), or |
28 | * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
29 | * in which case the provisions of the GPL or the LGPL are applicable instead |
30 | * of those above. If you wish to allow use of your version of this file only |
31 | * under the terms of either the GPL or the LGPL, and not to allow others to |
32 | * use your version of this file under the terms of the MPL, indicate your |
33 | * decision by deleting the provisions above and replace them with the notice |
34 | * and other provisions required by the GPL or the LGPL. If you do not delete |
35 | * the provisions above, a recipient may use your version of this file under |
36 | * the terms of any one of the MPL, the GPL or the LGPL. |
37 | * |
38 | * ***** END LICENSE BLOCK ***** */ |
39 | |
40 | #include "MMgc.h" |
41 | #include <float.h> |
42 | |
43 | #ifdef AVMPLUS_SAMPLER |
44 | namespace avmplus |
45 | { |
46 | void recordAllocationSample(const void* item, size_t size); |
47 | void recordDeallocationSample(const void* item, size_t size); |
48 | } |
49 | #endif |
50 | |
51 | #if defined MMGC_POLICY_PROFILING && !defined AVMSHELL_BUILD1 |
52 | extern void RedirectLogOutput(void (*)(const char*)); |
53 | static FILE* fp = NULL__null; |
54 | |
55 | void logToFile(const char* s) |
56 | { |
57 | fprintf(fp, "%s", s); |
58 | fflush(fp); |
59 | } |
60 | |
61 | static void startGCLogToFile() |
62 | { |
63 | fp = fopen("gcbehavior.txt", "w"); |
64 | if (fp != NULL__null) |
65 | RedirectLogOutput(logToFile); |
66 | } |
67 | |
68 | static void endGCLogToFile() |
69 | { |
70 | RedirectLogOutput(NULL__null); |
71 | if (fp != NULL__null) { |
72 | fclose(fp); |
73 | fp = NULL__null; |
74 | } |
75 | } |
76 | #endif // MMGC_POLICY_PROFILING && !AVMSHELL_BUILD |
77 | |
78 | namespace MMgc |
79 | { |
80 | GCHeap *GCHeap::instance = NULL__null; |
81 | boolbool GCHeap::instanceEnterLockInitialized = falsefalse; |
82 | vmpi_spin_lock_t GCHeap::instanceEnterLock; |
83 | |
84 | // GCHeap instance has the C++ runtime call dtor which causes problems |
85 | AVMPLUS_ALIGN8(uint8_t)uint8_t __attribute__ ((aligned (8))) heapSpace[sizeof(GCHeap)]; |
86 | |
87 | const size_t kLargeItemBlockId = ~0U; |
88 | |
89 | size_t GCHeap::leakedBytes; |
90 | |
91 | #ifdef MMGC_MEMORY_PROFILER |
92 | MemoryProfiler* GCHeap::profiler = (MemoryProfiler*)-1; |
93 | #endif |
94 | |
95 | GCHeapConfig::GCHeapConfig() : |
96 | initialSize(512), |
97 | heapLimit(kDefaultHeapLimit), |
98 | heapSoftLimit(0), |
99 | dispersiveAdversarial(0), // 0 means dispersive allocation is off. |
100 | OOMExitCode(0), |
101 | useVirtualMemory(VMPI_useVirtualMemory()), |
102 | trimVirtualMemory(truetrue), |
103 | mergeContiguousRegions(VMPI_canMergeContiguousRegions()), |
104 | sloppyCommit(VMPI_canCommitAlreadyCommittedMemory()), |
105 | verbose(falsefalse), |
106 | returnMemory(truetrue), |
107 | gcstats(falsefalse), // tracking |
108 | autoGCStats(falsefalse), // auto printing |
109 | #ifdef AVMSHELL_BUILD1 |
110 | gcbehavior(0), // controlled by command line switch |
111 | #else |
112 | gcbehavior(2), // unconditional, if MMGC_POLICY_PROFILING is on |
113 | #endif |
114 | eagerSweeping(falsefalse), |
115 | #ifdef MMGC_HEAP_GRAPH |
116 | dumpFalsePositives(falsefalse), |
117 | #endif |
118 | gcLoadCeiling(1.15), // Bug 619885: need > 1.0 to get belt loosening effect |
119 | gcEfficiency(0.25), |
120 | _checkFixedMemory(truetrue) // See comment in GCHeap.h for why the default must be 'true' |
121 | { |
122 | // Bugzilla 544695 - large heaps need to be controlled more tightly than |
123 | // small heaps due to the reliance of the Player on the GC for removing some |
124 | // objects from the AS2 scriptThreadList and because people don't want to |
125 | // use as much memory as a single policy across all heap sizes would require. |
126 | // As reference counting takes care of a lot of storage management, there's |
127 | // little harm in running the incremental GC more aggressively in large |
128 | // heaps - most of the time is spent elsewhere. |
129 | // |
130 | // Bug 619885: Note that mark/cons ratio is 1/(L-1); L=1.125 implies |
131 | // mark/cons is 8x. Programs that allocate rapidly would suffer significantly |
132 | // with smaller limit for L. (See also loadCeiling for other belt-loosening.) |
133 | |
134 | GCAssert(GCHeapConfig::kNumLoadFactors >= 7)do { } while (0); |
135 | |
136 | gcLoad[0] = 2.5; gcLoadCutoff[0] = 10; // Breathing room for warmup |
137 | gcLoad[1] = 2.0; gcLoadCutoff[1] = 25; // Classical 2x factor |
138 | gcLoad[2] = 1.75; gcLoadCutoff[2] = 50; // Tighten |
139 | gcLoad[3] = 1.5; gcLoadCutoff[3] = 75; // the |
140 | gcLoad[4] = 1.25; gcLoadCutoff[4] = 150; // screws |
141 | gcLoad[5] = 1.2; gcLoadCutoff[5] = 300; // Large heaps are |
142 | gcLoad[6] = 1.125; gcLoadCutoff[6] = DBL_MAX1.7976931348623157e+308;// controlled (very) tightly |
143 | |
144 | #ifdef MMGC_64BIT |
145 | trimVirtualMemory = falsefalse; // no need |
146 | #endif |
147 | const char *envValue = VMPI_getenv("MMGC_HEAP_LIMIT"); |
148 | if(envValue) |
149 | heapLimit = VMPI_strtol::strtol(envValue, 0, 10); |
150 | envValue = VMPI_getenv("MMGC_HEAP_SOFT_LIMIT"); |
151 | if(envValue) |
152 | heapSoftLimit = VMPI_strtol::strtol(envValue, 0, 10); |
153 | } |
154 | |
155 | boolbool GCHeapConfig::IsGCOptionWithParam(const char *arg) |
156 | { |
157 | if (!VMPI_strcmp::strcmp(arg, "-memlimit") |
158 | || !VMPI_strcmp::strcmp(arg, "-load") |
159 | || !VMPI_strcmp::strcmp(arg, "-loadCeiling") |
160 | || !VMPI_strcmp::strcmp(arg, "-gcwork") |
161 | || !VMPI_strcmp::strcmp(arg, "-gcstack")) |
162 | return truetrue; |
163 | else |
164 | return falsefalse; |
165 | } |
166 | |
167 | static boolbool HasPrefix(const char* aString, const char *aStr) |
168 | { |
169 | size_t n = VMPI_strlen::strlen(aStr); |
170 | return (VMPI_strncmp::strncmp(aString, aStr, n) == 0); |
171 | } |
172 | |
173 | static const char * useDefaultOrSkipForward(const char *arg, |
174 | const char *key, |
175 | const char *dflt) |
176 | { |
177 | size_t n = VMPI_strlen::strlen(key); |
178 | if (VMPI_strcmp::strcmp(arg, key) == 0) { |
179 | return dflt; |
180 | } else if (HasPrefix(arg, key) && |
181 | VMPI_strcmp::strcmp(arg, key) > 0) { |
182 | AvmAssert(dflt == NULL)do { } while (0); |
183 | const char *param = arg + n; |
184 | while (*param != 0 && (*param == ' ' || *param == '=')) |
185 | param++; |
186 | return param; |
187 | } else { |
188 | return NULL__null; |
189 | } |
190 | } |
191 | |
192 | boolbool GCHeapConfig::ParseAndApplyOption(const char *arg, boolbool &wrong, |
193 | const char *successorString/*=0*/) |
194 | { |
195 | wrong = falsefalse; // assume input is valid until we see otherwise. |
196 | |
197 | if (!VMPI_strcmp::strcmp(arg, "-memstats")) { |
198 | gcstats = truetrue; |
199 | autoGCStats = truetrue; |
200 | return truetrue; |
201 | } |
202 | else if (!VMPI_strcmp::strcmp(arg, "-memstats-verbose")) { |
203 | gcstats = truetrue; |
204 | autoGCStats = truetrue; |
205 | verbose = truetrue; |
206 | return truetrue; |
207 | } |
208 | else if (HasPrefix(arg, "-memlimit")) { |
209 | const char *param = |
210 | useDefaultOrSkipForward(arg, "-memlimit", successorString); |
211 | if (param == NULL__null) { |
212 | wrong = truetrue; |
213 | return truetrue; |
214 | } |
215 | heapLimit = VMPI_strtol::strtol(param, 0, 10); |
216 | return truetrue; |
217 | } |
218 | #ifdef MMGC_POLICY_PROFILING |
219 | else if (!VMPI_strcmp::strcmp(arg, "-gcbehavior")) { |
220 | gcbehavior = 2; |
221 | return truetrue; |
222 | } |
223 | else if (!VMPI_strcmp::strcmp(arg, "-gcsummary")) { |
224 | gcbehavior = 1; |
225 | return truetrue; |
226 | } |
227 | #endif |
228 | else if (!VMPI_strcmp::strcmp(arg, "-eagersweep")) { |
229 | eagerSweeping = truetrue; |
230 | return truetrue; |
231 | } |
232 | else if (HasPrefix(arg, "-load") && !HasPrefix(arg, "-loadCeiling")) { |
233 | const char *param = |
234 | useDefaultOrSkipForward(arg, "-load", successorString); |
235 | if (param == NULL__null) { |
236 | wrong = truetrue; |
237 | return truetrue; |
238 | } |
239 | |
240 | double load; |
241 | double limit; |
242 | int nchar; |
243 | const char* val = param; |
244 | size_t k = 0; |
245 | // limit=0 is legal, it means unlimited |
246 | for (;;) { |
247 | if (k < kNumLoadFactors) { |
248 | if (VMPI_sscanf::sscanf(val, "%lf,%lf%n", &load, &limit, &nchar) == 2 && |
249 | load > 1.0 && |
250 | limit >= 0.0) |
251 | { |
252 | k++; |
253 | val += nchar; |
254 | if (*val == 0) { |
255 | break; |
256 | } |
257 | if (*val == ',') { |
258 | val++; |
259 | continue; |
260 | } |
261 | } |
262 | else if (VMPI_sscanf::sscanf(val, "%lf%n", &load, &nchar) == 1 && |
263 | val[nchar] == 0 && |
264 | load > 1.0) |
265 | { |
266 | break; |
267 | } |
268 | } |
269 | wrong = truetrue; |
270 | return truetrue; |
271 | } |
272 | // Above loop validated the param string; below loop |
273 | // applies it. The control flow of below loop *must* |
274 | // match that of the above loop *exactly*; otherwise the |
275 | // validation will be out of sync with the configuration. |
276 | val = param; |
277 | k = 0; |
278 | for (;;) { |
279 | if (k < kNumLoadFactors) { |
280 | if (VMPI_sscanf::sscanf(val, "%lf,%lf%n", &load, &limit, &nchar) == 2 && |
281 | load > 1.0 && |
282 | limit >= 0.0) |
283 | { |
284 | gcLoad[k] = load; |
285 | gcLoadCutoff[k] = limit; |
286 | k++; |
287 | val += nchar; |
288 | if (*val == 0) { |
289 | gcLoadCutoff[k-1] = DBL_MAX1.7976931348623157e+308; |
290 | break; |
291 | } |
292 | if (*val == ',') { |
293 | val++; |
294 | continue; |
295 | } |
296 | } |
297 | else if (VMPI_sscanf::sscanf(val, "%lf%n", &load, &nchar) == 1 && |
298 | val[nchar] == 0 && |
299 | load > 1.0) |
300 | { |
301 | gcLoad[k] = load; |
302 | gcLoadCutoff[k] = DBL_MAX1.7976931348623157e+308; |
303 | break; |
304 | } |
305 | } |
306 | // (see above note; if we get here, we're out of sync) |
307 | GCAssert(false)do { } while (0); |
308 | } |
309 | return truetrue; |
310 | } |
311 | else if (HasPrefix(arg, "-loadCeiling")) { |
312 | const char *param = |
313 | useDefaultOrSkipForward(arg, "-loadCeiling", successorString); |
314 | if (param == NULL__null) { |
315 | wrong = truetrue; |
316 | return truetrue; |
317 | } |
318 | |
319 | double ceiling; |
320 | int nchar; |
321 | const char* val = param; |
322 | if (VMPI_sscanf::sscanf(val, "%lf%n", &ceiling, &nchar) == 1 && |
323 | size_t(nchar) == VMPI_strlen::strlen(val) && |
324 | ceiling >= 1.0) |
325 | { |
326 | gcLoadCeiling = ceiling; |
327 | return truetrue; |
328 | } |
329 | else |
330 | { |
331 | wrong = truetrue; |
332 | return truetrue; |
333 | } |
334 | } |
335 | else if (HasPrefix(arg, "-gcwork")) { |
336 | const char* param = |
337 | useDefaultOrSkipForward(arg, "-gcwork", successorString); |
338 | if (param == NULL__null) { |
339 | wrong = truetrue; |
340 | return truetrue; |
341 | } |
342 | |
343 | double work; |
344 | int nchar; |
345 | const char* val = param; |
346 | if (VMPI_sscanf::sscanf(val, "%lf%n", &work, &nchar) == 1 && size_t(nchar) == VMPI_strlen::strlen(val) && work > 0.0 && work <= 1.0) { |
347 | gcEfficiency = work; |
348 | return truetrue; |
349 | } |
350 | else { |
351 | wrong = truetrue; |
352 | return truetrue; |
353 | } |
354 | } |
355 | |
356 | // arg unmatched; option not handled here. |
357 | return falsefalse; |
358 | } |
359 | |
360 | /* static */ |
361 | void GCHeap::ResetStatics() |
362 | { |
363 | instance = NULL__null; |
364 | #ifdef MMGC_MEMORY_PROFILER |
365 | if(profiler && IsProfilerInitialized()) |
366 | delete profiler; |
367 | profiler = (MemoryProfiler*)-1; |
368 | #endif |
369 | } |
370 | |
371 | void GCHeap::Init(const GCHeapConfig& config) |
372 | { |
373 | GCAssert(instance == NULL)do { } while (0); |
374 | void *p = (void*)heapSpace; |
375 | instance = new (p) GCHeap(config); |
376 | } |
377 | |
378 | size_t GCHeap::Destroy() |
379 | { |
380 | EnterLock(); |
381 | GCAssert(instance != NULL)do { } while (0); |
382 | instance->DestroyInstance(); |
383 | EnterRelease(); |
384 | return leakedBytes; |
385 | } |
386 | |
387 | GCHeap::GCHeap(const GCHeapConfig& c) |
388 | : kNativePageSize(VMPI_getVMPageSize()), |
389 | lastRegion(NULL__null), |
390 | freeRegion(NULL__null), |
391 | nextRegion(NULL__null), |
392 | blocks(NULL__null), |
393 | blocksLen(0), |
394 | numDecommitted(0), |
395 | numRegionBlocks(0), |
396 | numAlloc(0), |
397 | gcheapCodeMemory(0), |
398 | externalCodeMemory(0), |
399 | externalPressure(0), |
400 | m_notificationThread(0), |
401 | config(c), |
402 | status(kMemNormal), |
403 | enterCount(0), |
404 | preventDestruct(0), |
405 | m_oomHandling(truetrue), |
406 | #ifdef MMGC_MEMORY_PROFILER |
407 | hasSpy(falsefalse), |
408 | #endif |
409 | maxTotalHeapSize(0), |
410 | #ifdef MMGC_POLICY_PROFILING |
411 | maxPrivateMemory(0), |
412 | #endif |
413 | largeAllocs(0), |
414 | #ifdef MMGC_HOOKS |
415 | hooksEnabled(falsefalse), |
416 | #endif |
417 | entryChecksEnabled(truetrue), |
418 | abortStatusNotificationSent(falsefalse) |
419 | { |
420 | VMPI_lockInit(&m_spinlock); |
421 | VMPI_lockInit(&gclog_spinlock); |
422 | |
423 | // ResetStatics should be called at the start here before using/initializing any statics |
424 | ResetStatics(); |
425 | |
426 | // Initialize free lists |
427 | HeapBlock *block = freelists; |
428 | for (uint32_t i=0; i<kNumFreeLists; i++) { |
429 | block->FreelistInit(); |
430 | block++; |
431 | } |
432 | |
433 | // Create the initial heap |
434 | { |
435 | MMGC_LOCK(m_spinlock)MMgc::GCAcquireSpinlock _lock(&m_spinlock); |
436 | if (!ExpandHeap((int)config.initialSize)) |
437 | { |
438 | Abort(); |
439 | } |
440 | } |
441 | |
442 | fixedMalloc.InitInstance(this); |
443 | |
444 | instance = this; |
445 | |
446 | #ifdef MMGC_MEMORY_PROFILER |
447 | //create profiler if turned on and if it is not already created |
448 | if(!IsProfilerInitialized()) |
449 | { |
450 | InitProfiler(); |
451 | } |
452 | |
453 | if(profiler) |
454 | { |
455 | hooksEnabled = truetrue; // set only after creating profiler |
456 | hasSpy = VMPI_spySetup(); |
457 | } |
458 | #endif |
459 | |
460 | #ifdef MMGC_MEMORY_INFO |
461 | hooksEnabled = truetrue; // always track allocs in DEBUG builds |
462 | #endif |
463 | |
464 | #if defined MMGC_POLICY_PROFILING && !defined AVMSHELL_BUILD1 |
465 | startGCLogToFile(); |
466 | #endif |
467 | } |
468 | |
469 | void GCHeap::DestroyInstance() |
470 | { |
471 | #ifdef MMGC_MEMORY_PROFILER |
472 | if (profiler) |
473 | profiler->DumpAllocationProfile(); |
474 | #endif |
475 | #if defined MMGC_POLICY_PROFILING && !defined AVMSHELL_BUILD1 |
476 | endGCLogToFile(); |
477 | #endif |
478 | |
479 | gcManager.destroy(); |
480 | callbacks.Destroy(); |
481 | |
482 | leakedBytes = GetFixedMalloc()->GetBytesInUse(); |
483 | fixedMalloc.DestroyInstance(); |
484 | GCAssertMsg(leakedBytes == 0 || GetStatus() == kMemAbort, "Leaks!")do { } while (0); |
485 | |
486 | size_t internalNum = AddrToBlock(blocks)->size + numRegionBlocks; |
487 | |
488 | // numAlloc should just be the size of the HeapBlock's space |
489 | if(numAlloc != internalNum && status != kMemAbort) |
490 | { |
491 | for (unsigned int i=0; i<blocksLen; i++) |
492 | { |
493 | HeapBlock *block = &blocks[i]; |
494 | if(block->inUse() && block->baseAddr && block->baseAddr != (char*)blocks) |
495 | { |
496 | #ifndef DEBUG |
497 | if (config.verbose) |
498 | #endif |
499 | { |
500 | GCLog("Block 0x%x not freed\n", block->baseAddr); |
501 | } |
502 | #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO) |
503 | if(block->allocTrace) |
504 | PrintStackTrace(block->allocTrace); |
505 | #endif |
506 | } |
507 | } |
508 | GCAssert(false)do { } while (0); |
509 | } |
510 | |
511 | #ifdef MMGC_MEMORY_PROFILER |
512 | hooksEnabled = falsefalse; |
513 | |
514 | if(hasSpy) |
515 | VMPI_spyTeardown(); |
516 | #endif |
517 | |
518 | FreeAll(); |
519 | ResetStatics(); |
520 | |
521 | // Acquire all the locks before destroying them to make reasonably |
522 | // sure we're the last consumers. This is probably not exactly |
523 | // right, see https://bugzilla.mozilla.org/show_bug.cgi?id=548347 |
524 | // and linked bugs for a discussion. Note we can't acquire these |
525 | // much higher up because we get into situations where GCHeap code |
526 | // will want to lock these locks, but they are already locked. |
527 | |
528 | VMPI_lockAcquire(&m_spinlock); |
529 | VMPI_lockRelease(&m_spinlock); |
530 | VMPI_lockDestroy(&m_spinlock); |
531 | |
532 | VMPI_lockAcquire(&gclog_spinlock); |
533 | VMPI_lockRelease(&gclog_spinlock); |
534 | VMPI_lockDestroy(&gclog_spinlock); |
535 | |
536 | if(enterFrame) |
537 | enterFrame->Destroy(); // Destroy the pointed-to value |
538 | enterFrame.destroy(); // Destroy the thread-local itself |
539 | } |
540 | |
541 | void* GCHeap::Alloc(size_t size, uint32_t flags, size_t alignment) |
542 | { |
543 | GCAssert(size > 0)do { } while (0); |
544 | GCAssert(alignment > 0)do { } while (0); |
545 | #ifdef DEBUG |
546 | { |
547 | // Alignment must be a power of 2 |
548 | size_t a = alignment; |
549 | while ((a & 1) == 0) |
550 | a >>= 1; |
551 | GCAssert(a == 1)do { } while (0); |
552 | } |
553 | #endif |
554 | |
555 | void *baseAddr = 0; |
556 | boolbool zero = (flags & kZero) != 0; |
557 | boolbool expand = (flags & kExpand) != 0; |
558 | { |
559 | MMGC_LOCK_ALLOW_RECURSION(m_spinlock, m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&m_spinlock, m_notificationThread ); |
560 | |
561 | boolbool saved_oomHandling = m_oomHandling; |
562 | m_oomHandling = saved_oomHandling && (flags & kNoOOMHandling) == 0; |
563 | |
564 | baseAddr = AllocHelper(size, expand, zero, alignment); |
565 | |
566 | // If expand didn't work, or expand flag not set, then try to free |
567 | // memory then realloc from the heap |
568 | if (!baseAddr) |
569 | { |
570 | SendFreeMemorySignal(size); |
571 | baseAddr = AllocHelper(size, expand, zero, alignment); |
572 | } |
573 | |
574 | // If we're still unable to allocate, we're done |
575 | if (!baseAddr) |
576 | { |
577 | if (flags & kCanFail) |
578 | { |
579 | m_oomHandling = saved_oomHandling; |
580 | return NULL__null; |
581 | } else { |
582 | Abort(); |
583 | } |
584 | } |
585 | |
586 | numAlloc += size; |
587 | |
588 | #ifdef MMGC_MEMORY_PROFILER |
589 | if((flags & kProfile) && HooksEnabled() && profiler) { |
590 | profiler->RecordAllocation(baseAddr, size * kBlockSize, size * kBlockSize, /*managed=*/falsefalse); |
591 | } |
592 | #endif |
593 | |
594 | // Only check for memory limits if we're allowing OOM notifications |
595 | if (m_oomHandling) |
596 | { |
597 | CheckForMemoryLimitsExceeded(); |
598 | } |
599 | |
600 | m_oomHandling = saved_oomHandling; |
601 | } |
602 | |
603 | GCAssert(Size(baseAddr) == size)do { } while (0); |
604 | |
605 | // Zero out the memory, if requested to do so |
606 | if (zero) { |
607 | // These pages may have been seen by valgrind before and |
608 | // they become unaddressable when we last called |
609 | // FREELIKE_BLOCK or MEMPOOL_DESTROY, use MAKE_MEM_DEFINED |
610 | // to silence write to freed memory errors. |
611 | VALGRIND_MAKE_MEM_DEFINED(baseAddr, size * kBlockSize){}; |
612 | VMPI_memset::memset(baseAddr, 0, size * kBlockSize); |
613 | // and then make the memory undefined again, we do this because |
614 | // we do this because either the VALGRIND_MALLOCLIKE_BLOCK call |
615 | // below will define it, or the suballocator will, ie this is |
616 | // here to keep the sub allocators honest. |
617 | VALGRIND_MAKE_MEM_UNDEFINED(baseAddr, size * kBlockSize){}; |
618 | } |
619 | |
620 | // Fail the allocation if we're a "canFail" allocation that has pushed beyond one of our limits. |
621 | if((flags & kCanFail) != 0 && (status == kMemSoftLimit || SoftLimitExceeded() || HardLimitExceeded() )) |
622 | { |
623 | FreeInternal(baseAddr, (flags & kProfile) != 0, m_oomHandling); |
624 | return NULL__null; |
625 | } |
626 | |
627 | // We utilize the "profile" flag to tell the difference |
628 | // between client requests and sub-allocator requests. Direct |
629 | // client requests are reported to valgrind here, sub |
630 | // allocators need to tell valgrind about memory themselves. |
631 | if ((flags & kProfile) != 0) { |
632 | VALGRIND_MALLOCLIKE_BLOCK(baseAddr, size * kBlockSize, 0, (flags&kZero) != 0){}; |
633 | } |
634 | |
635 | GCAssert(((uintptr_t)baseAddr >> kBlockShift) % alignment == 0)do { } while (0); |
636 | return baseAddr; |
637 | } |
638 | |
639 | void *GCHeap::AllocHelper(size_t size, boolbool expand, boolbool& zero, size_t alignment) |
640 | { |
641 | // first try to find it in our existing free memory |
642 | HeapBlock *block = AllocBlock(size, zero, alignment); |
643 | |
644 | // Try to expand if the flag is set |
645 | if(!block && expand) { |
646 | |
647 | // Look ahead at memory limits to see if we should trigger a free memory signal |
648 | if ( (HardLimitExceeded(size) || SoftLimitExceeded(size))) |
649 | { |
650 | SendFreeMemorySignal(size); |
651 | } |
652 | |
653 | // Don't allow our memory consumption to grow beyond hard limit |
654 | if(HardLimitExceeded(size)) |
655 | return NULL__null; |
656 | |
657 | if(size >= kOSAllocThreshold && config.useVirtualMemory) { |
658 | return LargeAlloc(size, alignment); |
659 | } else { |
660 | ExpandHeap(size); |
661 | block = AllocBlock(size, zero, alignment); |
662 | } |
663 | } |
664 | |
665 | #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO) |
666 | if(profiler && block) |
667 | block->allocTrace = profiler->GetStackTrace(); |
668 | #endif |
669 | |
670 | return block != NULL__null ? block->baseAddr : NULL__null; |
671 | } |
672 | |
673 | void GCHeap::SignalCodeMemoryAllocation(size_t size, boolbool gcheap_memory) |
674 | { |
675 | if (gcheap_memory) |
676 | gcheapCodeMemory += size; |
677 | else |
678 | externalCodeMemory += size; |
679 | CheckForMemoryLimitsExceeded(); |
680 | } |
681 | |
682 | void GCHeap::CheckForMemoryLimitsExceeded() |
683 | { |
684 | |
685 | // If we're already in the process of sending out memory notifications, don't bother verifying now. |
686 | if (status == MMgc::kMemAbort || statusNotificationBeingSent()) |
687 | return; |
688 | |
689 | size_t overage = 0; |
690 | if (SoftLimitExceeded()) |
691 | { |
692 | overage = GetTotalHeapSize() + externalPressure/kBlockSize - config.heapSoftLimit; |
693 | } |
694 | else if (HardLimitExceeded()) |
695 | { |
696 | overage = (GetTotalHeapSize() + externalPressure/kBlockSize) - config.heapLimit + (config.heapLimit / 10); |
697 | } |
698 | |
699 | if (overage) |
700 | { |
701 | SendFreeMemorySignal(overage); |
702 | |
703 | CheckForHardLimitExceeded(); |
704 | |
705 | CheckForSoftLimitExceeded(overage); |
706 | } |
707 | } |
708 | |
709 | void GCHeap::FreeInternal(const void *item, boolbool profile, boolbool oomHandling) |
710 | { |
711 | (void)profile; |
712 | |
713 | // recursive free calls are allowed from StatusChangeNotify |
714 | MMGC_LOCK_ALLOW_RECURSION(m_spinlock, m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&m_spinlock, m_notificationThread ); |
715 | |
716 | boolbool saved_oomHandling = m_oomHandling; |
717 | m_oomHandling = saved_oomHandling && oomHandling; |
718 | |
719 | HeapBlock *block = AddrToBlock(item); |
720 | |
721 | size_t size; |
722 | if(block == NULL__null) { |
723 | size = LargeAllocSize(item); |
724 | } else { |
725 | size = block->size; |
726 | } |
727 | |
728 | // Update metrics |
729 | GCAssert(numAlloc >= (unsigned int)size)do { } while (0); |
730 | numAlloc -= size; |
731 | |
732 | #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO) |
733 | if(profiler && block) |
734 | block->freeTrace = profiler->GetStackTrace(); |
735 | #endif |
736 | |
737 | #ifdef MMGC_MEMORY_PROFILER |
738 | if(profile && HooksEnabled() && profiler) { |
739 | profiler->RecordDeallocation(item, size * kBlockSize); |
740 | } |
741 | #endif |
742 | |
743 | if(block) |
744 | FreeBlock(block); |
745 | else |
746 | LargeFree(item); |
747 | |
748 | if (profile) |
749 | VALGRIND_FREELIKE_BLOCK(item, 0){}; |
750 | |
751 | m_oomHandling = saved_oomHandling; |
752 | } |
753 | |
754 | void GCHeap::Decommit() |
755 | { |
756 | // keep at least initialSize free |
757 | if(!config.returnMemory) |
758 | return; |
759 | |
760 | // don't decommit if OOM handling is disabled; there's a guard in the OOM code so this |
761 | // should never happen, but belt and suspenders... |
762 | if (!m_oomHandling) |
763 | return; |
764 | |
765 | size_t heapSize = GetTotalHeapSize(); |
766 | size_t freeSize = GetFreeHeapSize(); |
767 | |
768 | size_t decommitSize = 0; |
769 | // commit if > kDecommitThresholdPercentage is free |
770 | if(FreeMemoryExceedsDecommitThreshold()) |
771 | { |
772 | decommitSize = int((freeSize * 100 - heapSize * kDecommitThresholdPercentage) / 100); |
773 | } |
774 | // If we're over the heapLimit, attempt to decommit enough to get just under the limit |
775 | else if ( (heapSize > config.heapLimit) && ((heapSize - freeSize) < config.heapLimit)) |
776 | { |
777 | decommitSize = heapSize - config.heapLimit + 1; |
778 | |
779 | } |
780 | // If we're over the SoftLimit, attempt to decommit enough to get just under the softLimit |
781 | else if ((config.heapSoftLimit!= 0) && (heapSize > config.heapSoftLimit) && ((heapSize - freeSize) < config.heapSoftLimit)) |
782 | { |
783 | decommitSize = heapSize - config.heapSoftLimit + 1; |
784 | } |
785 | else { |
786 | return; |
787 | } |
788 | |
789 | if ((decommitSize < (size_t)kMinHeapIncrement) && (freeSize > (size_t)kMinHeapIncrement)) |
790 | { |
791 | |
792 | decommitSize = kMinHeapIncrement; |
793 | } |
794 | |
795 | // Don't decommit more than our initial config size. |
796 | if (heapSize - decommitSize < config.initialSize) |
797 | { |
798 | decommitSize = heapSize - config.initialSize; |
799 | } |
800 | |
801 | |
802 | MMGC_LOCK_ALLOW_RECURSION(m_spinlock, m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&m_spinlock, m_notificationThread ); |
803 | |
804 | restart: |
805 | |
806 | // search from the end of the free list so we decommit big blocks |
807 | HeapBlock *freelist = freelists+kNumFreeLists-1; |
808 | |
809 | HeapBlock *endOfBigFreelists = &freelists[GetFreeListIndex(1)]; |
810 | |
811 | for (; freelist >= endOfBigFreelists && decommitSize > 0; freelist--) |
812 | { |
813 | #ifdef MMGC_MAC |
814 | // We may call RemoveLostBlock below which splits regions |
815 | // and may need to create a new one, don't let it expand |
816 | // though, expanding while Decommit'ing would be silly. |
817 | if(!EnsureFreeRegion(/*expand=*/falsefalse)) |
818 | return; |
819 | #endif |
820 | |
821 | HeapBlock *block = freelist; |
822 | while ((block = block->prev) != freelist && decommitSize > 0) |
823 | { |
824 | // decommitting already decommitted blocks doesn't help |
825 | // temporary replacement for commented out conditional below |
826 | GCAssert(block->size != 0)do { } while (0); |
827 | if(!block->committed /*|| block->size == 0*/) |
828 | continue; |
829 | |
830 | if(config.useVirtualMemory) |
831 | { |
832 | RemoveFromList(block); |
833 | if((size_t)block->size > decommitSize) |
834 | { |
835 | HeapBlock *newBlock = Split(block, (int)decommitSize); |
836 | AddToFreeList(newBlock); |
837 | } |
838 | |
839 | Region *region = AddrToRegion(block->baseAddr); |
840 | if(config.trimVirtualMemory && |
841 | freeSize * 100 > heapSize * kReleaseThresholdPercentage && |
842 | // if block is as big or bigger than a region, free the whole region |
843 | block->baseAddr <= region->baseAddr && |
844 | region->reserveTop <= block->endAddr() ) |
845 | { |
846 | |
847 | if(block->baseAddr < region->baseAddr) |
848 | { |
849 | HeapBlock *newBlock = Split(block, int((region->baseAddr - block->baseAddr) / kBlockSize)); |
850 | AddToFreeList(block); |
851 | block = newBlock; |
852 | } |
853 | if(block->endAddr() > region->reserveTop) |
854 | { |
855 | HeapBlock *newBlock = Split(block, int((region->reserveTop - block->baseAddr) / kBlockSize)); |
856 | AddToFreeList(newBlock); |
857 | } |
858 | |
859 | decommitSize -= block->size; |
860 | RemoveBlock(block); |
861 | goto restart; |
862 | } |
863 | else if(VMPI_decommitMemory(block->baseAddr, block->size * kBlockSize)) |
864 | { |
865 | block->committed = falsefalse; |
866 | block->dirty = falsefalse; |
867 | decommitSize -= block->size; |
868 | if(config.verbose) { |
869 | GCLog("decommitted %d page block from %p\n", block->size, block->baseAddr); |
870 | } |
871 | } |
872 | else |
873 | { |
874 | #ifdef MMGC_MAC |
875 | // this can happen on mac where we release and re-reserve the memory and another thread may steal it from us |
876 | RemoveLostBlock(block); |
877 | goto restart; |
878 | #else |
879 | // if the VM API's fail us bail |
880 | VMPI_abort::abort(); |
881 | #endif |
882 | } |
883 | |
884 | numDecommitted += block->size; |
885 | |
886 | // merge with previous/next if not in use and not committed |
887 | HeapBlock *prev = block - block->sizePrevious; |
888 | if(block->sizePrevious != 0 && !prev->committed && !prev->inUse()) { |
889 | RemoveFromList(prev); |
890 | |
891 | prev->size += block->size; |
892 | |
893 | block->size = 0; |
894 | block->sizePrevious = 0; |
895 | block->baseAddr = 0; |
896 | |
897 | block = prev; |
898 | } |
899 | |
900 | HeapBlock *next = block + block->size; |
901 | if(next->size != 0 && !next->committed && !next->inUse()) { |
902 | RemoveFromList(next); |
903 | |
904 | block->size += next->size; |
905 | |
906 | next->size = 0; |
907 | next->sizePrevious = 0; |
908 | next->baseAddr = 0; |
909 | } |
910 | |
911 | next = block + block->size; |
912 | next->sizePrevious = block->size; |
913 | |
914 | // add this block to the back of the bus to make sure we consume committed memory |
915 | // first |
916 | HeapBlock *backOfTheBus = &freelists[kNumFreeLists-1]; |
917 | HeapBlock *pointToInsert = backOfTheBus; |
918 | while ((pointToInsert = pointToInsert->next) != backOfTheBus) { |
919 | if (pointToInsert->size >= block->size && !pointToInsert->committed) { |
920 | break; |
921 | } |
922 | } |
923 | AddToFreeList(block, pointToInsert); |
924 | |
925 | // so we keep going through freelist properly |
926 | block = freelist; |
927 | |
928 | } else { // not using virtual memory |
929 | |
930 | // if we aren't using mmap we can only do something if the block maps to a region |
931 | // that is completely empty |
932 | Region *region = AddrToRegion(block->baseAddr); |
933 | if(block->baseAddr == region->baseAddr && // beginnings match |
934 | region->commitTop == block->baseAddr + block->size*kBlockSize) { |
935 | |
936 | RemoveFromList(block); |
937 | |
938 | RemoveBlock(block); |
939 | |
940 | goto restart; |
941 | } |
942 | } |
943 | } |
944 | } |
945 | |
946 | if(config.verbose) |
947 | DumpHeapRep(); |
948 | CheckForStatusReturnToNormal(); |
949 | } |
950 | |
951 | // m_spinlock is held |
952 | void GCHeap::CheckForHardLimitExceeded() |
953 | { |
954 | if (!HardLimitExceeded()) |
955 | return; |
956 | |
957 | Abort(); |
958 | } |
959 | |
960 | // m_spinlock is held |
961 | void GCHeap::CheckForSoftLimitExceeded(size_t request) |
962 | { |
963 | if(config.heapSoftLimit == 0 || status != kMemNormal || !SoftLimitExceeded()) |
964 | return; |
965 | |
966 | size_t externalBlocks = externalPressure / kBlockSize; |
967 | GCDebugMsgavmplus::AvmDebugMsg(falsefalse, "*** Alloc exceeded softlimit: ask for %u, usedheapsize =%u, totalHeap =%u, of which external =%u\n", |
968 | unsigned(request), |
969 | unsigned(GetUsedHeapSize() + externalBlocks), |
970 | unsigned(GetTotalHeapSize() + externalBlocks), |
971 | unsigned(externalBlocks)); |
972 | |
973 | if(statusNotificationBeingSent()) |
974 | return; |
975 | |
976 | StatusChangeNotify(kMemSoftLimit); |
977 | } |
978 | |
979 | // m_spinlock is held |
980 | void GCHeap::CheckForStatusReturnToNormal() |
981 | { |
982 | if(!statusNotificationBeingSent() && statusNotNormalOrAbort()) |
983 | { |
984 | size_t externalBlocks = externalPressure / kBlockSize; |
985 | size_t total = GetTotalHeapSize() + externalBlocks; |
986 | |
987 | // return to normal if we drop below heapSoftLimit |
988 | if(config.heapSoftLimit != 0 && status == kMemSoftLimit) |
989 | { |
990 | if (!SoftLimitExceeded()) |
991 | { |
992 | size_t used = GetUsedHeapSize() + externalBlocks; |
993 | GCDebugMsgavmplus::AvmDebugMsg(falsefalse, "### Alloc dropped below softlimit: usedheapsize =%u, totalHeap =%u, of which external =%u\n", |
994 | unsigned(used), |
995 | unsigned(total), |
996 | unsigned(externalBlocks) ); |
997 | StatusChangeNotify(kMemNormal); |
998 | } |
999 | } |
1000 | // or if we shrink to below %10 of the max |
1001 | else if ((maxTotalHeapSize / kBlockSize + externalBlocks) * 9 > total * 10) |
1002 | StatusChangeNotify(kMemNormal); |
1003 | } |
1004 | } |
1005 | |
1006 | #ifdef MMGC_MAC |
1007 | |
1008 | void GCHeap::RemoveLostBlock(HeapBlock *block) |
1009 | { |
1010 | if(config.verbose) { |
1011 | GCLog("Removing block %p %d\n", block->baseAddr, block->size); |
1012 | DumpHeapRep(); |
1013 | } |
1014 | |
1015 | { |
1016 | Region *region = AddrToRegion(block->baseAddr); |
1017 | if(region->baseAddr == block->baseAddr && region->reserveTop == block->endAddr()) { |
1018 | RemoveBlock(block, /*release*/falsefalse); |
1019 | return; |
1020 | } |
1021 | } |
1022 | |
1023 | while(AddrToRegion(block->baseAddr) != AddrToRegion(block->endAddr()-1)) { |
1024 | // split block into parts mapping to regions |
1025 | Region *r = AddrToRegion(block->baseAddr); |
1026 | size_t numBlocks = (r->commitTop - block->baseAddr) / kBlockSize; |
1027 | char *next = Split(block, numBlocks)->baseAddr; |
1028 | // remove it |
1029 | RemoveLostBlock(block); |
1030 | block = AddrToBlock(next); |
1031 | } |
1032 | |
1033 | Region *region = AddrToRegion(block->baseAddr); |
1034 | // save these off since we'll potentially shift region |
1035 | char *regionBaseAddr = region->baseAddr; |
1036 | size_t regionBlockId = region->blockId; |
1037 | |
1038 | // if we don't line up with beginning or end we need a new region |
1039 | if(block->baseAddr != region->baseAddr && region->commitTop != block->endAddr()) { |
1040 | |
1041 | GCAssertMsg(HaveFreeRegion(), "Decommit was supposed to ensure this!")do { } while (0); |
1042 | |
1043 | NewRegion(block->endAddr(), region->reserveTop, |
1044 | region->commitTop > block->endAddr() ? region->commitTop : block->endAddr(), |
1045 | region->blockId + (block->endAddr() - region->baseAddr) / kBlockSize); |
1046 | |
1047 | if(region->baseAddr != block->baseAddr) { |
1048 | // end this region at the start of block going away |
1049 | region->reserveTop = block->baseAddr; |
1050 | if(region->commitTop > block->baseAddr) |
1051 | region->commitTop = block->baseAddr; |
1052 | } |
1053 | |
1054 | } else if(region->baseAddr == block->baseAddr) { |
1055 | region->blockId += block->size; |
1056 | region->baseAddr = block->endAddr(); |
1057 | } else if(region->commitTop == block->endAddr()) { |
1058 | // end this region at the start of block going away |
1059 | region->reserveTop = block->baseAddr; |
1060 | if(region->commitTop > block->baseAddr) |
1061 | region->commitTop = block->baseAddr; |
1062 | } else { |
1063 | GCAssertMsg(false, "This shouldn't be possible")do { } while (0); |
1064 | } |
1065 | |
1066 | |
1067 | // create temporary region for this block |
1068 | Region temp(this, block->baseAddr, block->endAddr(), block->endAddr(), regionBlockId + (block->baseAddr - regionBaseAddr) / kBlockSize); |
1069 | |
1070 | RemoveBlock(block, /*release*/falsefalse); |
1071 | |
1072 | // pop temp from freelist, put there by RemoveBlock |
1073 | freeRegion = *(Region**)freeRegion; |
1074 | |
1075 | |
1076 | |
1077 | #ifdef DEBUG |
1078 | // doing this here is an extra validation step |
1079 | if(config.verbose) |
1080 | { |
1081 | DumpHeapRep(); |
1082 | } |
1083 | #endif |
1084 | } |
1085 | |
1086 | #endif |
1087 | |
1088 | void GCHeap::RemoveBlock(HeapBlock *block, boolbool release) |
1089 | { |
1090 | Region *region = AddrToRegion(block->baseAddr); |
1091 | |
1092 | GCAssert(region->baseAddr == block->baseAddr)do { } while (0); |
1093 | GCAssert(region->reserveTop == block->endAddr())do { } while (0); |
1094 | |
1095 | size_t newBlocksLen = blocksLen - block->size; |
1096 | |
1097 | HeapBlock *nextBlock = block + block->size; |
1098 | |
1099 | boolbool need_sentinel = falsefalse; |
1100 | boolbool remove_sentinel = falsefalse; |
1101 | |
1102 | if( block->sizePrevious && nextBlock->size ) { |
1103 | // This block is contiguous with the blocks before and after it |
1104 | // so we need to add a sentinel |
1105 | need_sentinel = truetrue; |
1106 | } |
1107 | else if ( !block->sizePrevious && !nextBlock->size ) { |
1108 | // the block is not contigous with the block before or after it - we need to remove a sentinel |
1109 | // since there would already be one on each side. |
1110 | remove_sentinel = truetrue; |
1111 | } |
1112 | |
1113 | // update nextblock's sizePrevious |
1114 | nextBlock->sizePrevious = need_sentinel ? 0 : block->sizePrevious; |
1115 | |
1116 | // Add space for the sentinel - the remaining blocks won't be contiguous |
1117 | if(need_sentinel) |
1118 | ++newBlocksLen; |
1119 | else if(remove_sentinel) |
1120 | --newBlocksLen; |
1121 | |
1122 | // just re-use blocks; small wastage possible |
1123 | HeapBlock *newBlocks = blocks; |
1124 | |
1125 | // the memmove will overwrite this so save it |
1126 | size_t blockSize = block->size; |
1127 | |
1128 | size_t offset = int(block-blocks); |
1129 | int32_t sen_offset = 0; |
1130 | HeapBlock *src = block + block->size; |
1131 | |
1132 | if( need_sentinel ) { |
1133 | offset = int(block-blocks)+1; |
1134 | sen_offset = 1; |
1135 | HeapBlock* sentinel = newBlocks + (block-blocks); |
1136 | sentinel->baseAddr = NULL__null; |
1137 | sentinel->size = 0; |
1138 | sentinel->sizePrevious = block->sizePrevious; |
1139 | sentinel->prev = NULL__null; |
1140 | sentinel->next = NULL__null; |
1141 | #if defined(MMGC_MEMORY_PROFILER) && defined(MMGC_MEMORY_INFO) |
1142 | sentinel->allocTrace = 0; |
1143 | #endif |
1144 | } |
1145 | else if( remove_sentinel ) { |
1146 | // skip trailing sentinel |
1147 | src++; |
1148 | sen_offset = -1; |
1149 | } |
1150 | |
1151 | // copy blocks after |
1152 | int lastChunkSize = int((blocks + blocksLen) - src); |
1153 | GCAssert(lastChunkSize + offset == newBlocksLen)do { } while (0); |
1154 | memmove(newBlocks + offset, src, lastChunkSize * sizeof(HeapBlock)); |
1155 | |
1156 | // Fix up the prev/next pointers of each freelist. This is a little more complicated |
1157 | // than the similiar code in ExpandHeap because blocks after the one we are free'ing |
1158 | // are sliding down by block->size |
1159 | HeapBlock *fl = freelists; |
1160 | for (uint32_t i=0; i<kNumFreeLists; i++) { |
1161 | HeapBlock *temp = fl; |
1162 | do { |
1163 | if (temp->prev != fl) { |
1164 | if(temp->prev > block) { |
1165 | temp->prev = newBlocks + (temp->prev-blocks-blockSize) + sen_offset; |
1166 | } |
1167 | } |
1168 | if (temp->next != fl) { |
1169 | if(temp->next > block) { |
1170 | temp->next = newBlocks + (temp->next-blocks-blockSize) + sen_offset; |
1171 | } |
1172 | } |
1173 | } while ((temp = temp->next) != fl); |
1174 | fl++; |
1175 | } |
1176 | |
1177 | // Need to decrement blockId for regions in blocks after block |
1178 | Region *r = lastRegion; |
1179 | while(r) { |
1180 | if(r->blockId > region->blockId && r->blockId != kLargeItemBlockId) { |
1181 | r->blockId -= (blockSize-sen_offset); |
1182 | } |
1183 | r = r->prev; |
1184 | } |
1185 | |
1186 | blocksLen = newBlocksLen; |
1187 | RemoveRegion(region, release); |
1188 | |
1189 | // make sure we did everything correctly |
1190 | CheckFreelist(); |
1191 | ValidateHeapBlocks(); |
1192 | } |
1193 | |
1194 | void GCHeap::ValidateHeapBlocks() |
1195 | { |
1196 | #ifdef _DEBUG |
1197 | // iterate through HeapBlocks making sure: |
1198 | // non-contiguous regions have a sentinel |
1199 | HeapBlock *block = blocks; |
1200 | while(block - blocks < (intptr_t)blocksLen) { |
1201 | Region *r = AddrToRegion(block->baseAddr); |
1202 | if(r && r->baseAddr == block->baseAddr) |
1203 | GCAssert(r->blockId == (size_t)(block-blocks))do { } while (0); |
1204 | |
1205 | HeapBlock *next = NULL__null; |
1206 | if(block->size) { |
1207 | next = block + block->size; |
1208 | GCAssert(next - blocks < (intptr_t)blocksLen)do { } while (0); |
1209 | GCAssert(next->sizePrevious == block->size)do { } while (0); |
1210 | } |
1211 | HeapBlock *prev = NULL__null; |
1212 | if(block->sizePrevious) { |
1213 | prev = block - block->sizePrevious; |
1214 | GCAssert(prev - blocks >= 0)do { } while (0); |
1215 | GCAssert(prev->size == block->sizePrevious)do { } while (0); |
1216 | } else if(block != blocks) { |
1217 | // I have no prev and I'm not the first, check sentinel |
1218 | HeapBlock *sentinel = block-1; |
1219 | GCAssert(sentinel->baseAddr == NULL)do { } while (0); |
1220 | GCAssert(sentinel->size == 0)do { } while (0); |
1221 | GCAssert(sentinel->sizePrevious != 0)do { } while (0); |
1222 | } |
1223 | if(block->baseAddr) { |
1224 | if(prev) |
1225 | GCAssert(block->baseAddr == prev->baseAddr + (kBlockSize * prev->size))do { } while (0); |
1226 | block = next; |
1227 | // we should always end on a sentinel |
1228 | GCAssert(next - blocks < (int)blocksLen)do { } while (0); |
1229 | } else { |
1230 | // block is a sentinel |
1231 | GCAssert(block->size == 0)do { } while (0); |
1232 | // FIXME: the following asserts are firing and we need to understand why, could be bugs |
1233 | // make sure last block ends at commitTop |
1234 | Region *prevRegion = AddrToRegion(prev->baseAddr + (prev->size*kBlockSize) - 1); |
1235 | GCAssert(prev->baseAddr + (prev->size*kBlockSize) == prevRegion->commitTop)do { } while (0); |
1236 | block++; |
1237 | // either we've reached the end or the next isn't a sentinel |
1238 | GCAssert(block - blocks == (intptr_t)blocksLen || block->size != 0)do { } while (0); |
1239 | } |
1240 | } |
1241 | GCAssert(block - blocks == (intptr_t)blocksLen)do { } while (0); |
1242 | #endif |
1243 | } |
1244 | |
1245 | GCHeap::Region *GCHeap::AddrToRegion(const void *item) const |
1246 | { |
1247 | // Linear search of regions list to find this address. |
1248 | // The regions list should usually be pretty short. |
1249 | for (Region *region = lastRegion; |
1250 | region != NULL__null; |
1251 | region = region->prev) |
1252 | { |
1253 | if (item >= region->baseAddr && item < region->reserveTop) { |
1254 | return region; |
1255 | } |
1256 | } |
1257 | return NULL__null; |
1258 | } |
1259 | |
1260 | GCHeap::HeapBlock* GCHeap::AddrToBlock(const void *item) const |
1261 | { |
1262 | Region *region = AddrToRegion(item); |
1263 | if(region) { |
1264 | if(region->blockId == kLargeItemBlockId) |
1265 | return NULL__null; |
1266 | size_t index = ((char*)item - region->baseAddr) / kBlockSize; |
1267 | HeapBlock *b = blocks + region->blockId + index; |
1268 | GCAssert(item >= b->baseAddr && item < b->baseAddr + b->size * GCHeap::kBlockSize)do { } while (0); |
1269 | return b; |
1270 | } |
1271 | return NULL__null; |
1272 | } |
1273 | |
1274 | size_t GCHeap::SafeSize(const void *item) |
1275 | { |
1276 | MMGC_LOCK_ALLOW_RECURSION(m_spinlock, m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&m_spinlock, m_notificationThread ); |
1277 | GCAssert((uintptr_t(item) & kOffsetMask) == 0)do { } while (0); |
1278 | HeapBlock *block = AddrToBlock(item); |
1279 | if (block) |
1280 | return block->size; |
1281 | Region *r = AddrToRegion(item); |
1282 | if(r && r->blockId == kLargeItemBlockId) |
1283 | return LargeAllocSize(item); |
1284 | return (size_t)-1; |
1285 | } |
1286 | |
1287 | // Return the number of blocks of slop at the beginning of an object |
1288 | // starting at baseAddr for the given alignment. Alignment is a |
1289 | // number of blocks and must be a power of 2. baseAddr must |
1290 | // point to the beginning of a block. |
1291 | |
1292 | static inline size_t alignmentSlop(char* baseAddr, size_t alignment) |
1293 | { |
1294 | return (alignment - (size_t)(((uintptr_t)baseAddr >> GCHeap::kBlockShift) & (alignment - 1))) & (alignment - 1); |
1295 | } |
1296 | |
1297 | #ifdef DEBUG |
1298 | // Reserves region of size == sizeInBytes, while attempting to |
1299 | // insert filler >= fill_sz bytes between pairs of consecutively |
1300 | // reserved regions. (Goal: exercise address space extremities |
1301 | // w/o actually committing memory within the filler area itself.) |
1302 | static char* reserveSomeRegionDispersively(size_t fill_sz, size_t sizeInBytes) { |
1303 | static boolbool retLowAddr = falsefalse; // each call toggles low/high. |
1304 | |
1305 | void *mem0 = VMPI_reserveMemoryRegion(NULL__null, fill_sz); |
1306 | void *mem1 = VMPI_reserveMemoryRegion(NULL__null, fill_sz); |
1307 | |
1308 | if ((retLowAddr && mem0 > mem1) || ( !retLowAddr && mem0 < mem1)) { |
1309 | void *swap_tmp = mem0; |
1310 | mem0 = mem1; |
1311 | mem1 = swap_tmp; |
1312 | } |
1313 | |
1314 | VMPI_releaseMemoryRegion(mem0, fill_sz); |
1315 | char *addr = (char*)VMPI_reserveMemoryRegion(mem0, sizeInBytes); |
1316 | VMPI_releaseMemoryRegion(mem1, fill_sz); |
1317 | if (addr == NULL__null) { |
1318 | addr = (char*)VMPI_reserveMemoryRegion(NULL__null, sizeInBytes); |
1319 | } |
1320 | retLowAddr = ! retLowAddr; |
1321 | |
1322 | return addr; |
1323 | } |
1324 | #endif |
1325 | |
1326 | REALLY_INLINEinline __attribute__((always_inline)) char *GCHeap::ReserveSomeRegion(size_t sizeInBytes) |
1327 | { |
1328 | #ifdef DEBUG |
1329 | if (!config.dispersiveAdversarial) |
1330 | return (char*)VMPI_reserveMemoryRegion(NULL__null, sizeInBytes); |
1331 | else |
1332 | return reserveSomeRegionDispersively(config.dispersiveAdversarial, |
1333 | sizeInBytes); |
1334 | #else |
1335 | return (char*)VMPI_reserveMemoryRegion(NULL__null, sizeInBytes); |
1336 | #endif |
1337 | } |
1338 | |
1339 | void *GCHeap::LargeAlloc(size_t size, size_t alignment) |
1340 | { |
1341 | GCAssert(config.useVirtualMemory)do { } while (0); |
1342 | |
1343 | size_t sizeInBytes = size * kBlockSize; |
1344 | |
1345 | if(!EnsureFreeRegion(truetrue)) |
1346 | return NULL__null; |
1347 | |
1348 | char* addr = ReserveSomeRegion(sizeInBytes); |
1349 | |
1350 | if(!addr) |
1351 | return NULL__null; |
1352 | |
1353 | size_t unalignedSize = sizeInBytes; |
1354 | |
1355 | if(alignmentSlop(addr, alignment) != 0) { |
1356 | VMPI_releaseMemoryRegion(addr, sizeInBytes); |
1357 | unalignedSize = sizeInBytes + (alignment-1) * kBlockSize; |
1358 | addr = ReserveSomeRegion(unalignedSize); |
1359 | if(!addr) |
1360 | return NULL__null; |
1361 | } |
1362 | |
1363 | char *alignedAddr = addr + alignmentSlop(addr, alignment) * kBlockSize; |
1364 | if(!VMPI_commitMemory(alignedAddr, sizeInBytes)) { |
1365 | VMPI_releaseMemoryRegion(addr, sizeInBytes); |
1366 | return NULL__null; |
1367 | } |
1368 | |
1369 | // Note that we don't actually track the beginning of the committed region |
1370 | // LargeFree doesn't need it. |
1371 | NewRegion(addr, |
1372 | addr + unalignedSize, // reserveTop |
1373 | alignedAddr + sizeInBytes, // commitTop |
1374 | kLargeItemBlockId); |
1375 | largeAllocs += size; |
1376 | CheckForNewMaxTotalHeapSize(); |
1377 | |
1378 | return alignedAddr; |
1379 | } |
1380 | |
1381 | void GCHeap::LargeFree(const void *item) |
1382 | { |
1383 | GCAssert(config.useVirtualMemory)do { } while (0); |
1384 | |
1385 | size_t size = LargeAllocSize(item); |
1386 | largeAllocs -= size; |
1387 | Region *r = AddrToRegion(item); |
1388 | // Must use r->baseAddr which may be less than item due to alignment, |
1389 | // and we must calculate full size |
1390 | VMPI_releaseMemoryRegion(r->baseAddr, r->reserveTop - r->baseAddr); |
1391 | RemoveRegion(r, falsefalse); |
1392 | } |
1393 | |
1394 | GCHeap::HeapBlock* GCHeap::AllocBlock(size_t size, boolbool& zero, size_t alignment) |
1395 | { |
1396 | uint32_t startList = GetFreeListIndex(size); |
1397 | HeapBlock *freelist = &freelists[startList]; |
1398 | |
1399 | HeapBlock *decommittedSuitableBlock = NULL__null; |
1400 | |
1401 | // Search for a big enough block in the free lists |
1402 | |
1403 | for (uint32_t i = startList; i < kNumFreeLists; i++) |
1404 | { |
1405 | HeapBlock *block = freelist; |
1406 | while ((block = block->next) != freelist) |
1407 | { |
1408 | // Prefer a single committed block that is at least large enough. |
1409 | |
1410 | if (block->size >= size + alignmentSlop(block->baseAddr, alignment) && block->committed) { |
1411 | RemoveFromList(block); |
1412 | return AllocCommittedBlock(block, size, zero, alignment); |
1413 | } |
1414 | |
1415 | // Look for a sequence of decommitted and committed blocks that together would |
1416 | // be large enough, in case a single committed block can't be found. |
1417 | |
1418 | if(config.useVirtualMemory && decommittedSuitableBlock == NULL__null && !block->committed) |
1419 | { |
1420 | // Note, 'block' should be invariant throughout this scope, it's the block |
1421 | // whose successors and predecessors we're inspecting |
1422 | |
1423 | GCAssert(!block->inUse())do { } while (0); |
1424 | |
1425 | size_t totalSize = block->size; |
1426 | HeapBlock *firstFree = block; |
1427 | size_t firstSlop = alignmentSlop(firstFree->baseAddr, alignment); |
1428 | |
1429 | // Coalesce with predecessors |
1430 | while(totalSize < size + firstSlop && firstFree->sizePrevious != 0) |
1431 | { |
1432 | HeapBlock *prevBlock = firstFree - firstFree->sizePrevious; |
1433 | if(prevBlock->inUse() || prevBlock->size == 0) |
1434 | break; |
1435 | totalSize += prevBlock->size; |
1436 | firstFree = prevBlock; |
1437 | firstSlop = alignmentSlop(firstFree->baseAddr, alignment); |
1438 | } |
1439 | |
1440 | // Coalesce with successors |
1441 | HeapBlock *nextBlock = block + block->size; |
1442 | while (totalSize < size + firstSlop && !(nextBlock->inUse() || nextBlock->size == 0)) { |
1443 | totalSize += nextBlock->size; |
1444 | nextBlock = nextBlock + nextBlock->size; |
1445 | } |
1446 | |
1447 | // Keep it if it's large enough |
1448 | if(totalSize >= size + firstSlop) |
1449 | decommittedSuitableBlock = firstFree; |
1450 | } |
1451 | } |
1452 | freelist++; |
1453 | } |
1454 | |
1455 | // We only get here if we couldn't find a single committed large enough block. |
1456 | |
1457 | if (decommittedSuitableBlock != NULL__null) |
1458 | return AllocCommittedBlock(CreateCommittedBlock(decommittedSuitableBlock, size, alignment), |
1459 | size, |
1460 | zero, |
1461 | alignment); |
1462 | |
1463 | return NULL__null; |
1464 | } |
1465 | |
1466 | GCHeap::HeapBlock* GCHeap::AllocCommittedBlock(HeapBlock* block, size_t size, boolbool& zero, size_t alignment) |
1467 | { |
1468 | GCAssert(block->committed)do { } while (0); |
1469 | GCAssert(block->size >= size)do { } while (0); |
1470 | GCAssert(block->inUse())do { } while (0); |
1471 | |
1472 | size_t slop = alignmentSlop(block->baseAddr, alignment); |
1473 | |
1474 | if (slop > 0) |
1475 | { |
1476 | HeapBlock *oldBlock = block; |
1477 | block = Split(block, slop); |
1478 | AddToFreeList(oldBlock); |
1479 | GCAssert(alignmentSlop(block->baseAddr, alignment) == 0)do { } while (0); |
1480 | GCAssert(block->size >= size)do { } while (0); |
1481 | } |
1482 | |
1483 | if(block->size > size) |
1484 | { |
1485 | HeapBlock *newBlock = Split(block, size); |
1486 | AddToFreeList(newBlock); |
1487 | } |
1488 | |
1489 | CheckFreelist(); |
1490 | |
1491 | zero = block->dirty && zero; |
1492 | |
1493 | #ifdef _DEBUG |
1494 | if (!block->dirty) |
1495 | { |
1496 | union { |
1497 | const char* base_c; |
1498 | const uint32_t* base_u; |
1499 | }; |
1500 | base_c = block->baseAddr; |
1501 | GCAssert(*base_u == 0)do { } while (0); |
1502 | } |
1503 | #endif |
1504 | return block; |
1505 | } |
1506 | |
1507 | // Turn a sequence of committed and uncommitted blocks into a single committed |
1508 | // block that's at least large enough to satisfy the request. |
1509 | |
1510 | GCHeap::HeapBlock* GCHeap::CreateCommittedBlock(HeapBlock* block, size_t size, size_t alignment) |
1511 | { |
1512 | RemoveFromList(block); |
1513 | |
1514 | // We'll need to allocate extra space to account for the space that will |
1515 | // later be removed from the start of the block. |
1516 | |
1517 | size += alignmentSlop(block->baseAddr, alignment); |
1518 | |
1519 | // If the first block is too small then coalesce it with the following blocks |
1520 | // to create a block that's large enough. Some parts of the total block may |
1521 | // already be committed. If the platform allows it we commit the entire |
1522 | // range with one call even if parts were committed before, on the assumption |
1523 | // that that is faster than several commit() calls, one for each decommitted |
1524 | // block. (We have no current data to support that; now == 201-03-19.) |
1525 | |
1526 | if(block->size < size) |
1527 | { |
1528 | size_t amountRecommitted = block->committed ? 0 : block->size; |
1529 | boolbool dirty = block->dirty; |
1530 | |
1531 | // The first block needs to be committed when sloppyCommit is disabled. |
1532 | if(!config.sloppyCommit && !block->committed) |
1533 | Commit(block); |
1534 | |
1535 | while(block->size < size) |
1536 | { |
1537 | // Coalesce the next block into this one. |
1538 | |
1539 | HeapBlock *nextBlock = block + block->size; |
1540 | RemoveFromList(nextBlock); |
1541 | |
1542 | if (nextBlock->committed) |
1543 | dirty = dirty || nextBlock->dirty; |
1544 | else |
1545 | { |
1546 | if (block->size + nextBlock->size >= size) // Last block? |
1547 | PruneDecommittedBlock(nextBlock, block->size + nextBlock->size, size); |
1548 | |
1549 | amountRecommitted += nextBlock->size; |
1550 | |
1551 | if (!config.sloppyCommit) |
1552 | Commit(nextBlock); |
1553 | } |
1554 | |
1555 | block->size += nextBlock->size; |
1556 | |
1557 | nextBlock->size = 0; |
1558 | nextBlock->baseAddr = 0; |
1559 | nextBlock->sizePrevious = 0; |
1560 | } |
1561 | |
1562 | (block + block->size)->sizePrevious = block->size; |
1563 | |
1564 | GCAssert(amountRecommitted > 0)do { } while (0); |
1565 | |
1566 | if (config.sloppyCommit) |
1567 | Commit(block); |
1568 | block->dirty = dirty; |
1569 | } |
1570 | else |
1571 | { |
1572 | PruneDecommittedBlock(block, block->size, size); |
1573 | Commit(block); |
1574 | } |
1575 | |
1576 | GCAssert(block->size >= size && block->committed)do { } while (0); |
1577 | |
1578 | CheckFreelist(); |
1579 | |
1580 | return block; |
1581 | } |
1582 | |
1583 | // If the tail of a coalesced block is decommitted and committing it creates |
1584 | // a block that's too large for the request then we may wish to split the tail |
1585 | // before committing it in order to avoid committing memory we won't need. |
1586 | // |
1587 | // 'available' is the amount of memory available including the memory in 'block', |
1588 | // and 'request' is the amount of memory required. |
1589 | |
1590 | void GCHeap::PruneDecommittedBlock(HeapBlock* block, size_t available, size_t request) |
1591 | { |
1592 | GCAssert(available >= request)do { } while (0); |
1593 | GCAssert(!block->committed)do { } while (0); |
1594 | |
1595 | size_t toCommit = request > kMinHeapIncrement ? request : kMinHeapIncrement; |
1596 | size_t leftOver = available - request; |
1597 | |
1598 | if (available > toCommit && leftOver > 0) |
1599 | { |
1600 | HeapBlock *newBlock = Split(block, block->size - leftOver); |
1601 | AddToFreeList(newBlock); |
1602 | } |
1603 | } |
1604 | |
1605 | GCHeap::HeapBlock *GCHeap::Split(HeapBlock *block, size_t size) |
1606 | { |
1607 | GCAssert(block->size > size)do { } while (0); |
1608 | HeapBlock *newBlock = block + size; |
1609 | newBlock->Init(block->baseAddr + kBlockSize * size, block->size - size, block->dirty); |
1610 | newBlock->sizePrevious = size; |
1611 | newBlock->committed = block->committed; |
1612 | block->size = size; |
1613 | |
1614 | // Update sizePrevious in block after that |
1615 | HeapBlock *nextBlock = newBlock + newBlock->size; |
1616 | nextBlock->sizePrevious = newBlock->size; |
1617 | |
1618 | return newBlock; |
1619 | } |
1620 | |
1621 | void GCHeap::Commit(HeapBlock *block) |
1622 | { |
1623 | GCAssert(config.sloppyCommit || !block->committed)do { } while (0); |
1624 | |
1625 | if(!VMPI_commitMemory(block->baseAddr, block->size * kBlockSize)) |
1626 | { |
1627 | GCAssert(false)do { } while (0); |
1628 | } |
1629 | if(config.verbose) { |
1630 | GCLog("recommitted %d pages\n", block->size); |
1631 | DumpHeapRep(); |
1632 | } |
1633 | numDecommitted -= block->size; |
1634 | block->committed = truetrue; |
1635 | block->dirty = VMPI_areNewPagesDirty(); |
1636 | } |
1637 | |
1638 | #ifdef _DEBUG |
1639 | // Non-debug version in GCHeap.h |
1640 | void GCHeap::CheckFreelist() |
1641 | { |
1642 | HeapBlock *freelist = freelists; |
1643 | for (uint32_t i = 0; i < kNumFreeLists; i++) |
1644 | { |
1645 | HeapBlock *block = freelist; |
1646 | while((block = block->next) != freelist) |
1647 | { |
1648 | GCAssert(block != block->next)do { } while (0); |
1649 | GCAssert(block != block->next->next || block->next == freelist)do { } while (0); |
1650 | |
1651 | // Coalescing is eager so no block on the list should have adjacent blocks |
1652 | // that are also on the free list and in the same committed state |
1653 | |
1654 | if(block->sizePrevious) |
1655 | { |
1656 | HeapBlock *prev = block - block->sizePrevious; |
1657 | GCAssert(block->sizePrevious == prev->size)do { } while (0); |
1658 | GCAssert(prev->inUse() || prev->size == 0 || prev->committed != block->committed)do { } while (0); |
1659 | } |
1660 | { |
1661 | HeapBlock *next = block + block->size; |
1662 | GCAssert(next->inUse() || next->size == 0 || next->committed != block->committed)do { } while (0); |
1663 | } |
1664 | } |
1665 | freelist++; |
1666 | } |
1667 | #if 0 |
1668 | // Debugging code to find problems with block/region layout |
1669 | // This code is slow, but can be useful for tracking down issues |
1670 | // It verifies that the memory for each block corresponds to one or more regions |
1671 | // and that each region points to a valid starting block |
1672 | Region* r = lastRegion; |
1673 | |
1674 | int block_idx = 0; |
1675 | boolbool errors =falsefalse; |
1676 | for(block_idx = 0; block_idx < blocksLen; ++block_idx){ |
1677 | HeapBlock* b = blocks + block_idx; |
1678 | |
1679 | if( !b->size ) |
1680 | continue; |
1681 | |
1682 | int contig_size = 0; |
1683 | r = lastRegion; |
1684 | |
1685 | while( r ){ |
1686 | if(b->baseAddr >= r->baseAddr && b->baseAddr < r->reserveTop ) { |
1687 | // starts in this region |
1688 | char* end = b->baseAddr + b->size*kBlockSize; |
1689 | if(end > (r->reserveTop + contig_size) ){ |
1690 | GCLog("error, block %d %p %d did not find a matching region\n", block_idx, b->baseAddr, b->size); |
1691 | GCLog("Started in region %p - %p, contig size: %d\n", r->baseAddr, r->reserveTop, contig_size); |
1692 | errors = truetrue; |
1693 | break; |
1694 | } |
1695 | } |
1696 | else if( r->prev && r->prev->reserveTop==r->baseAddr){ |
1697 | contig_size +=r->reserveTop - r->baseAddr; |
1698 | } |
1699 | else{ |
1700 | contig_size = 0; |
1701 | } |
1702 | |
1703 | r = r->prev; |
1704 | } |
1705 | } |
1706 | |
1707 | while(r) |
1708 | { |
1709 | if(!blocks[r->blockId].size){ |
1710 | for( int i = r->blockId-1; i >= 0 ; --i ) |
1711 | if( blocks[i].size){ |
1712 | //Look for spanning blocks |
1713 | if( ((blocks[i].baseAddr + blocks[i].size*kBlockSize) <= r->baseAddr) ) { |
1714 | GCLog("Invalid block id for region %p-%p %d\n", r->baseAddr, r->reserveTop, i); |
1715 | errors =truetrue; |
1716 | break; |
1717 | } |
1718 | else |
1719 | break; |
1720 | } |
1721 | } |
1722 | r = r->prev; |
1723 | } |
1724 | if( errors ){ |
1725 | r = lastRegion; |
1726 | while(r) { |
1727 | GCLog("%p - %p\n", r->baseAddr, r->reserveTop); |
1728 | r = r->prev; |
1729 | } |
1730 | for(int b = 0; b < blocksLen; ++b ){ |
1731 | if(!blocks[b].size) |
1732 | continue; |
1733 | GCLog("%d %p %d\n", b, blocks[b].baseAddr, blocks[b].size); |
1734 | } |
1735 | asm("int3"); |
1736 | } |
1737 | #endif |
1738 | } |
1739 | #endif // DEBUG |
1740 | |
1741 | boolbool GCHeap::BlocksAreContiguous(void *item1, void *item2) |
1742 | { |
1743 | Region *r1 = AddrToRegion(item1); |
1744 | Region *r2 = AddrToRegion(item2); |
1745 | return r1 == r2 || r1->reserveTop == r2->baseAddr; |
1746 | } |
1747 | |
1748 | void GCHeap::AddToFreeList(HeapBlock *block, HeapBlock* pointToInsert) |
1749 | { |
1750 | CheckFreelist(); |
1751 | |
1752 | block->next = pointToInsert; |
1753 | block->prev = pointToInsert->prev; |
1754 | block->prev->next = block; |
1755 | pointToInsert->prev = block; |
1756 | |
1757 | CheckFreelist(); |
1758 | } |
1759 | |
1760 | void GCHeap::AddToFreeList(HeapBlock* block, boolbool makeDirty) |
1761 | { |
1762 | GCAssert(block->size != 0)do { } while (0); |
1763 | |
1764 | // Try to coalesce a committed block with its committed non-sentinel predecessor |
1765 | if(block->committed && block->sizePrevious) |
1766 | { |
1767 | HeapBlock *prevBlock = block - block->sizePrevious; |
1768 | GCAssert(prevBlock->size > 0 || !prevBlock->committed)do { } while (0); |
1769 | |
1770 | if (!prevBlock->inUse() && prevBlock->committed) |
1771 | { |
1772 | // Remove predecessor block from free list |
1773 | RemoveFromList(prevBlock); |
1774 | |
1775 | // Increase size of predecessor block |
1776 | prevBlock->size += block->size; |
1777 | |
1778 | block->size = 0; |
1779 | block->sizePrevious = 0; |
1780 | block->baseAddr = 0; |
1781 | |
1782 | block = prevBlock; |
1783 | makeDirty = makeDirty || block->dirty; |
1784 | } |
1785 | } |
1786 | |
1787 | // Try to coalesce a committed block with its committed non-sentinel successor |
1788 | if (block->committed) |
1789 | { |
1790 | HeapBlock *nextBlock = block + block->size; |
1791 | // This is not correct - sentinels are not necessarily committed. We |
1792 | // may or may not want to fix that. |
1793 | //GCAssert(nextBlock->size > 0 || !nextBlock->committed); |
1794 | |
1795 | if (!nextBlock->inUse() && nextBlock->committed) { |
1796 | // Remove successor block from free list |
1797 | RemoveFromList(nextBlock); |
1798 | |
1799 | // Increase size of current block |
1800 | block->size += nextBlock->size; |
1801 | nextBlock->size = 0; |
1802 | nextBlock->baseAddr = 0; |
1803 | nextBlock->sizePrevious = 0; |
1804 | makeDirty = makeDirty || nextBlock->dirty; |
1805 | } |
1806 | } |
1807 | |
1808 | // Update sizePrevious in the next block |
1809 | HeapBlock *nextBlock = block + block->size; |
1810 | nextBlock->sizePrevious = block->size; |
1811 | |
1812 | block->dirty = block->dirty || makeDirty; |
1813 | |
1814 | // Add the coalesced block to the right free list, in the right |
1815 | // position. Free lists are ordered by increasing block size. |
1816 | { |
1817 | int index = GetFreeListIndex(block->size); |
1818 | HeapBlock *freelist = &freelists[index]; |
1819 | HeapBlock *pointToInsert = freelist; |
1820 | |
1821 | // If the block size is below kUniqueThreshold then its free list |
1822 | // will have blocks of only one size and no search is needed. |
1823 | |
1824 | if (block->size >= kUniqueThreshold) { |
1825 | while ((pointToInsert = pointToInsert->next) != freelist) { |
1826 | if (pointToInsert->size >= block->size) { |
1827 | break; |
1828 | } |
1829 | } |
1830 | } |
1831 | |
1832 | AddToFreeList(block, pointToInsert); |
1833 | } |
1834 | } |
1835 | |
1836 | void GCHeap::FreeBlock(HeapBlock *block) |
1837 | { |
1838 | GCAssert(block->inUse())do { } while (0); |
1839 | |
1840 | #ifdef _DEBUG |
1841 | // trash it. fb == free block |
1842 | if (!RUNNING_ON_VALGRINDfalse) |
1843 | VMPI_memset::memset(block->baseAddr, uint8_t(MMFreedPoison), block->size * kBlockSize); |
1844 | #endif |
1845 | |
1846 | AddToFreeList(block, truetrue); |
1847 | } |
1848 | |
1849 | void GCHeap::CheckForNewMaxTotalHeapSize() |
1850 | { |
1851 | // GetTotalHeapSize is probably fairly cheap; even so this strikes me |
1852 | // as a bit of a hack. |
1853 | size_t heapSizeNow = GetTotalHeapSize() * kBlockSize; |
1854 | if (heapSizeNow > maxTotalHeapSize) { |
1855 | maxTotalHeapSize = heapSizeNow; |
1856 | #ifdef MMGC_POLICY_PROFILING |
1857 | // The guard on instance being non-NULL is a hack, to be fixed later (now=2009-07-20). |
1858 | // Some VMPI layers (WinMo is at least one of them) try to grab the GCHeap instance to get |
1859 | // at the map of private pages. But the GCHeap instance is not available during the initial |
1860 | // call to ExpandHeap. So sidestep that problem here. |
1861 | // |
1862 | // Note that if CheckForNewMaxTotalHeapSize is only called once then maxPrivateMemory |
1863 | // will be out of sync with maxTotalHeapSize, see also bugzilla 608684. |
1864 | if (instance != NULL__null) |
1865 | maxPrivateMemory = VMPI_getPrivateResidentPageCount() * VMPI_getVMPageSize(); |
1866 | #endif |
1867 | } |
1868 | } |
1869 | |
1870 | boolbool GCHeap::ExpandHeap( size_t askSize) |
1871 | { |
1872 | boolbool bRetVal = ExpandHeapInternal(askSize); |
1873 | CheckForNewMaxTotalHeapSize(); |
1874 | return bRetVal; |
1875 | } |
1876 | |
1877 | boolbool GCHeap::HardLimitExceeded(size_t additionalAllocationAmt) |
1878 | { |
1879 | return GetTotalHeapSize() + externalPressure/kBlockSize + additionalAllocationAmt > config.heapLimit; |
1880 | } |
1881 | |
1882 | boolbool GCHeap::SoftLimitExceeded(size_t additionalAllocationAmt) |
1883 | { |
1884 | if (config.heapSoftLimit == 0) return falsefalse; |
1885 | return GetTotalHeapSize() + externalPressure/kBlockSize + additionalAllocationAmt > config.heapSoftLimit; |
1886 | } |
1887 | |
1888 | #define roundUp(_s, _inc)(((_s + _inc - 1) / _inc) * _inc) (((_s + _inc - 1) / _inc) * _inc) |
1889 | |
1890 | boolbool GCHeap::ExpandHeapInternal(size_t askSize) |
1891 | { |
1892 | size_t size = askSize; |
1893 | |
1894 | #ifdef _DEBUG |
1895 | // Turn this switch on to test bridging of contiguous |
1896 | // regions. |
1897 | boolbool test_bridging = falsefalse; |
1898 | size_t defaultReserve = test_bridging ? (size+kMinHeapIncrement) : kDefaultReserve; |
1899 | #else |
1900 | const size_t defaultReserve = kDefaultReserve; |
1901 | #endif |
1902 | |
1903 | char *baseAddr = NULL__null; |
1904 | char *newRegionAddr = NULL__null; |
1905 | size_t newRegionSize = 0; |
1906 | boolbool contiguous = falsefalse; |
1907 | size_t commitAvail = 0; |
1908 | |
1909 | // Round up to the nearest kMinHeapIncrement |
1910 | size = roundUp(size, kMinHeapIncrement)(((size + kMinHeapIncrement - 1) / kMinHeapIncrement) * kMinHeapIncrement ); |
1911 | |
1912 | // when we allocate a new region the space needed for the HeapBlocks, if it won't fit |
1913 | // in existing space it must fit in new space so we may need to increase the new space |
1914 | |
1915 | HeapBlock *newBlocks = blocks; |
1916 | |
1917 | if(blocksLen != 0 || // first time through just take what we need out of initialSize instead of adjusting |
1918 | config.initialSize == 0) // unless initializeSize is zero of course |
1919 | { |
1920 | int extraBlocks = 1; // for potential new sentinel |
1921 | if(nextRegion == NULL__null) { |
1922 | extraBlocks++; // may need a new page for regions |
1923 | } |
1924 | size_t curHeapBlocksSize = blocks ? AddrToBlock(blocks)->size : 0; |
1925 | size_t newHeapBlocksSize = numHeapBlocksToNumBlocks(blocksLen + size + extraBlocks); |
1926 | |
1927 | // size is based on newSize and vice versa, loop to settle (typically one loop, sometimes two) |
1928 | while(newHeapBlocksSize > curHeapBlocksSize) |
1929 | { |
1930 | // use askSize so HeapBlock's can fit in rounding slop |
1931 | size = roundUp(askSize + newHeapBlocksSize + extraBlocks, kMinHeapIncrement)(((askSize + newHeapBlocksSize + extraBlocks + kMinHeapIncrement - 1) / kMinHeapIncrement) * kMinHeapIncrement); |
1932 | |
1933 | // tells us use new memory for blocks below |
1934 | newBlocks = NULL__null; |
1935 | |
1936 | // since newSize is based on size we have to repeat in case it changes |
1937 | curHeapBlocksSize = newHeapBlocksSize; |
1938 | newHeapBlocksSize = numHeapBlocksToNumBlocks(blocksLen + size + extraBlocks); |
1939 | } |
1940 | } |
1941 | |
1942 | // At this point we have adjusted/calculated the final size that would need to be committed. |
1943 | // We need to check that against the hardlimit to see if we are going to go above it. |
1944 | if (HardLimitExceeded(size)) |
1945 | return falsefalse; |
1946 | |
1947 | if(config.useVirtualMemory) |
1948 | { |
1949 | Region *region = lastRegion; |
1950 | if (region != NULL__null) |
1951 | { |
1952 | commitAvail = (int)((region->reserveTop - region->commitTop) / kBlockSize); |
1953 | |
1954 | // Can this request be satisfied purely by committing more memory that |
1955 | // is already reserved? |
1956 | if (size <= commitAvail) { |
1957 | if (VMPI_commitMemory(region->commitTop, size * kBlockSize)) |
1958 | { |
1959 | // Succeeded! |
1960 | baseAddr = region->commitTop; |
1961 | |
1962 | // check for continuity, we can only be contiguous with the end since |
1963 | // we don't have a "block insert" facility |
1964 | HeapBlock *last = &blocks[blocksLen-1] - blocks[blocksLen-1].sizePrevious; |
1965 | contiguous = last->baseAddr + last->size * kBlockSize == baseAddr; |
1966 | |
1967 | // Update the commit top. |
1968 | region->commitTop += size*kBlockSize; |
1969 | |
1970 | // Go set up the block list. |
1971 | goto gotMemory; |
1972 | } |
1973 | else |
1974 | { |
1975 | // If we can't commit memory we've already reserved, |
1976 | // no other trick is going to work. Fail. |
1977 | return falsefalse; |
1978 | } |
1979 | } |
1980 | |
1981 | // Try to reserve a region contiguous to the last region. |
1982 | |
1983 | // - Try for the "default reservation size" if it's larger than |
1984 | // the requested block. |
1985 | if (defaultReserve > size) { |
1986 | newRegionAddr = (char*) VMPI_reserveMemoryRegion(region->reserveTop, |
1987 | defaultReserve * kBlockSize); |
1988 | newRegionSize = defaultReserve; |
1989 | } |
1990 | |
1991 | // - If the default reservation size didn't work or isn't big |
1992 | // enough, go for the exact amount requested, minus the |
1993 | // committable space in the current region. |
1994 | if (newRegionAddr == NULL__null) { |
1995 | newRegionAddr = (char*) VMPI_reserveMemoryRegion(region->reserveTop, |
1996 | (size - commitAvail)*kBlockSize); |
1997 | newRegionSize = size - commitAvail; |
1998 | |
1999 | // check for contiguity |
2000 | if(newRegionAddr && newRegionAddr != region->reserveTop) { |
2001 | // we can't use this space since we need commitAvail from prev region to meet |
2002 | // the size requested, toss it |
2003 | ReleaseMemory(newRegionAddr, newRegionSize*kBlockSize); |
2004 | newRegionAddr = NULL__null; |
2005 | newRegionSize = 0; |
2006 | } |
2007 | } |
2008 | |
2009 | if (newRegionAddr == region->reserveTop) // we'll use the region below as a separate region if its not contiguous |
2010 | { |
2011 | // We were able to reserve some space. |
2012 | |
2013 | // Commit available space from the existing region. |
2014 | if (commitAvail != 0) { |
2015 | if (!VMPI_commitMemory(region->commitTop, commitAvail * kBlockSize)) |
2016 | { |
2017 | // We couldn't commit even this space. We're doomed. |
2018 | // Un-reserve the space we just reserved and fail. |
2019 | ReleaseMemory(newRegionAddr, newRegionSize); |
2020 | return falsefalse; |
2021 | } |
2022 | } |
2023 | |
2024 | // Commit needed space from the new region. |
2025 | if (!VMPI_commitMemory(newRegionAddr, (size - commitAvail) * kBlockSize)) |
2026 | { |
2027 | // We couldn't commit this space. We can't meet the |
2028 | // request. Un-commit any memory we just committed, |
2029 | // un-reserve any memory we just reserved, and fail. |
2030 | if (commitAvail != 0) { |
2031 | VMPI_decommitMemory(region->commitTop, |
2032 | commitAvail * kBlockSize); |
2033 | } |
2034 | ReleaseMemory(newRegionAddr, |
2035 | (size-commitAvail)*kBlockSize); |
2036 | return falsefalse; |
2037 | } |
2038 | |
2039 | // We successfully reserved a new contiguous region |
2040 | // and committed the memory we need. Finish up. |
2041 | baseAddr = region->commitTop; |
2042 | region->commitTop = lastRegion->reserveTop; |
2043 | |
2044 | // check for continuity, we can only be contiguous with the end since |
2045 | // we don't have a "block insert" facility |
2046 | HeapBlock *last = &blocks[blocksLen-1] - blocks[blocksLen-1].sizePrevious; |
2047 | contiguous = last->baseAddr + last->size * kBlockSize == baseAddr; |
2048 | |
2049 | goto gotMemory; |
2050 | } |
2051 | } |
2052 | |
2053 | // We were unable to allocate a contiguous region, or there |
2054 | // was no existing region to be contiguous to because this |
2055 | // is the first-ever expansion. Allocate a non-contiguous region. |
2056 | |
2057 | // Don't use any of the available space in the current region. |
2058 | commitAvail = 0; |
2059 | |
2060 | // - Go for the default reservation size unless the requested |
2061 | // size is bigger. |
2062 | if (newRegionAddr == NULL__null && size < defaultReserve) { |
2063 | newRegionAddr = ReserveSomeRegion(defaultReserve*kBlockSize); |
2064 | newRegionSize = defaultReserve; |
2065 | } |
2066 | |
2067 | // - If that failed or the requested size is bigger than default, |
2068 | // go for the requested size exactly. |
2069 | if (newRegionAddr == NULL__null) { |
2070 | newRegionAddr = ReserveSomeRegion(size*kBlockSize); |
2071 | newRegionSize = size; |
2072 | } |
2073 | |
2074 | // - If that didn't work, give up. |
2075 | if (newRegionAddr == NULL__null) { |
2076 | return falsefalse; |
2077 | } |
2078 | |
2079 | // - Try to commit the memory. |
2080 | if (VMPI_commitMemory(newRegionAddr, |
2081 | size*kBlockSize) == 0) |
2082 | { |
2083 | // Failed. Un-reserve the memory and fail. |
2084 | ReleaseMemory(newRegionAddr, newRegionSize*kBlockSize); |
2085 | return falsefalse; |
2086 | } |
2087 | |
2088 | // If we got here, we've successfully allocated a |
2089 | // non-contiguous region. |
2090 | baseAddr = newRegionAddr; |
2091 | contiguous = falsefalse; |
2092 | |
2093 | } |
2094 | else |
2095 | { |
2096 | // Allocate the requested amount of space as a new region. |
2097 | newRegionAddr = (char*)VMPI_allocateAlignedMemory(size * kBlockSize); |
2098 | baseAddr = newRegionAddr; |
2099 | newRegionSize = size; |
2100 | |
2101 | // If that didn't work, give up. |
2102 | if (newRegionAddr == NULL__null) { |
2103 | return falsefalse; |
2104 | } |
2105 | } |
2106 | |
2107 | gotMemory: |
2108 | |
2109 | // If we were able to allocate a contiguous block, remove |
2110 | // the old top sentinel. |
2111 | if (contiguous) { |
2112 | blocksLen--; |
2113 | } |
2114 | |
2115 | // Expand the block list. |
2116 | size_t newBlocksLen = blocksLen + size; |
2117 | |
2118 | // Add space for the "top" sentinel |
2119 | newBlocksLen++; |
2120 | |
2121 | if (!newBlocks) { |
2122 | newBlocks = (HeapBlock*)(void *)baseAddr; |
2123 | } |
2124 | |
2125 | // Copy all the existing blocks. |
2126 | if (blocks && blocks != newBlocks) { |
2127 | VMPI_memcpy::memcpy(newBlocks, blocks, blocksLen * sizeof(HeapBlock)); |
2128 | |
2129 | // Fix up the prev/next pointers of each freelist. |
2130 | HeapBlock *freelist = freelists; |
2131 | for (uint32_t i=0; i<kNumFreeLists; i++) { |
2132 | HeapBlock *temp = freelist; |
2133 | do { |
2134 | if (temp->prev != freelist) { |
2135 | temp->prev = newBlocks + (temp->prev-blocks); |
2136 | } |
2137 | if (temp->next != freelist) { |
2138 | temp->next = newBlocks + (temp->next-blocks); |
2139 | } |
2140 | } while ((temp = temp->next) != freelist); |
2141 | freelist++; |
2142 | } |
2143 | CheckFreelist(); |
2144 | } |
2145 | |
2146 | // Create a single free block for the new space, |
2147 | // and add it to the free list. |
2148 | HeapBlock *block = newBlocks+blocksLen; |
2149 | block->Init(baseAddr, size, newPagesDirty()); |
2150 | |
2151 | // link up contiguous blocks |
2152 | if(blocksLen && contiguous) |
2153 | { |
2154 | // search backwards for first real block |
2155 | HeapBlock *b = &blocks[blocksLen-1]; |
2156 | while(b->size == 0) |
2157 | { |
2158 | b--; |
2159 | GCAssert(b >= blocks)do { } while (0); |
2160 | } |
2161 | block->sizePrevious = b->size; |
2162 | GCAssert((block - block->sizePrevious)->size == b->size)do { } while (0); |
2163 | } |
2164 | |
2165 | // if baseAddr was used for HeapBlocks split |
2166 | if((char*)newBlocks == baseAddr) |
2167 | { |
2168 | size_t numBlocksNeededForHeapBlocks = numHeapBlocksToNumBlocks(newBlocksLen); |
2169 | HeapBlock *next = Split(block, numBlocksNeededForHeapBlocks); |
2170 | // this space counts as used space |
2171 | numAlloc += numBlocksNeededForHeapBlocks; |
2172 | block = next; |
2173 | } |
2174 | |
2175 | // get space for region allocations |
2176 | if(nextRegion == NULL__null) { |
2177 | nextRegion = (Region*)(void *)block->baseAddr; |
2178 | HeapBlock *next = Split(block, 1); |
2179 | // this space counts as used space |
2180 | numAlloc++; |
2181 | numRegionBlocks++; |
2182 | block = next; |
2183 | } |
2184 | |
2185 | // Save off and add after initializing all blocks. |
2186 | HeapBlock *newBlock = block; |
2187 | |
2188 | // Initialize the rest of the new blocks to empty. |
2189 | size_t freeBlockSize = block->size; |
2190 | |
2191 | for (uint32_t i=1; i < freeBlockSize; i++) { |
2192 | block++; |
2193 | block->Clear(); |
2194 | } |
2195 | |
2196 | // Fill in the sentinel for the top of the heap. |
2197 | block++; |
2198 | block->Clear(); |
2199 | block->sizePrevious = freeBlockSize; |
2200 | |
2201 | AddToFreeList(newBlock); |
2202 | |
2203 | // save for free'ing |
2204 | void *oldBlocks = blocks; |
2205 | |
2206 | blocks = newBlocks; |
2207 | blocksLen = newBlocksLen; |
2208 | |
2209 | // free old blocks space using new blocks (FreeBlock poisons blocks so can't use old blocks) |
2210 | if (oldBlocks && oldBlocks != newBlocks) { |
2211 | HeapBlock *oldBlocksHB = AddrToBlock(oldBlocks); |
2212 | numAlloc -= oldBlocksHB->size; |
2213 | FreeBlock(oldBlocksHB); |
2214 | } |
2215 | |
2216 | // If we created a new region, save the base address so we can free later. |
2217 | if (newRegionAddr) { |
2218 | /* The mergeContiguousRegions bit is broken, since we |
2219 | loop over all regions we may be contiguous with an |
2220 | existing older HeapBlock and we don't support inserting a |
2221 | new address range arbritrarily into the HeapBlock |
2222 | array (contiguous regions must be contiguous heap |
2223 | blocks vis-a-vie the region block id) |
2224 | if(contiguous && |
2225 | config.mergeContiguousRegions) { |
2226 | lastRegion->reserveTop += newRegionSize*kBlockSize; |
2227 | lastRegion->commitTop += |
2228 | (size-commitAvail)*kBlockSize; |
2229 | } else |
2230 | */ { |
2231 | Region *newRegion = NewRegion(newRegionAddr, // baseAddr |
2232 | newRegionAddr+newRegionSize*kBlockSize, // reserve top |
2233 | newRegionAddr+(size-commitAvail)*kBlockSize, // commit top |
2234 | newBlocksLen-(size-commitAvail)-1); // block id |
2235 | |
2236 | if(config.verbose) |
2237 | GCLog("reserved new region, %p - %p %s\n", |
2238 | newRegion->baseAddr, |
2239 | newRegion->reserveTop, |
2240 | contiguous ? "contiguous" : "non-contiguous"); |
2241 | } |
2242 | } |
2243 | |
2244 | CheckFreelist(); |
2245 | |
2246 | if(config.verbose) { |
2247 | GCLog("heap expanded by %d pages\n", size); |
2248 | DumpHeapRep(); |
2249 | } |
2250 | ValidateHeapBlocks(); |
2251 | |
2252 | // Success! |
2253 | return truetrue; |
2254 | } |
2255 | |
2256 | void GCHeap::RemoveRegion(Region *region, boolbool release) |
2257 | { |
2258 | Region **next = &lastRegion; |
2259 | while(*next != region) |
2260 | next = &((*next)->prev); |
2261 | *next = region->prev; |
2262 | if(release) { |
2263 | ReleaseMemory(region->baseAddr, |
2264 | region->reserveTop-region->baseAddr); |
2265 | } |
2266 | if(config.verbose) { |
2267 | GCLog("unreserved region 0x%p - 0x%p (commitTop: %p)\n", region->baseAddr, region->reserveTop, region->commitTop); |
2268 | DumpHeapRep(); |
2269 | } |
2270 | FreeRegion(region); |
2271 | } |
2272 | |
2273 | void GCHeap::FreeAll() |
2274 | { |
2275 | // Release all of the heap regions |
2276 | while (lastRegion != NULL__null) { |
2277 | Region *region = lastRegion; |
2278 | lastRegion = lastRegion->prev; |
2279 | if(region->blockId == kLargeItemBlockId) { |
2280 | // leaks can happen during abort |
2281 | GCAssertMsg(status == kMemAbort, "Allocation of large object not freed")do { } while (0); |
2282 | VMPI_releaseMemoryRegion(region->baseAddr, region->reserveTop - region->baseAddr); |
2283 | } else { |
2284 | ReleaseMemory(region->baseAddr, |
2285 | region->reserveTop-region->baseAddr); |
2286 | } |
2287 | } |
2288 | } |
2289 | |
2290 | #ifdef MMGC_HOOKS |
2291 | void GCHeap::AllocHook(const void *item, size_t askSize, size_t gotSize, boolbool managed) |
2292 | { |
2293 | (void)item; |
2294 | (void)askSize; |
2295 | (void)gotSize; |
2296 | (void)managed; |
2297 | #ifdef MMGC_MEMORY_PROFILER |
2298 | if(hasSpy) { |
2299 | VMPI_spyCallback(); |
2300 | } |
2301 | if(profiler) |
2302 | profiler->RecordAllocation(item, askSize, gotSize, managed); |
2303 | #else |
2304 | (void)managed; |
2305 | #endif |
2306 | |
2307 | #ifdef MMGC_MEMORY_INFO |
2308 | DebugDecorate(item, gotSize); |
2309 | #endif |
2310 | #ifdef AVMPLUS_SAMPLER |
2311 | avmplus::recordAllocationSample(item, gotSize); |
2312 | #endif |
2313 | } |
2314 | |
2315 | void GCHeap::FinalizeHook(const void *item, size_t size) |
2316 | { |
2317 | (void)item,(void)size; |
2318 | #ifdef MMGC_MEMORY_PROFILER |
2319 | if(profiler) |
2320 | profiler->RecordDeallocation(item, size); |
2321 | #endif |
2322 | |
2323 | #ifdef AVMPLUS_SAMPLER |
2324 | avmplus::recordDeallocationSample(item, size); |
2325 | #endif |
2326 | } |
2327 | |
2328 | void GCHeap::FreeHook(const void *item, size_t size, int poison) |
2329 | { |
2330 | (void)poison,(void)item,(void)size; |
2331 | #ifdef MMGC_MEMORY_INFO |
2332 | DebugFree(item, poison, size, truetrue); |
2333 | #endif |
2334 | } |
2335 | |
2336 | void GCHeap::PseudoFreeHook(const void *item, size_t size, int poison) |
2337 | { |
2338 | (void)poison,(void)item,(void)size; |
2339 | #ifdef MMGC_MEMORY_INFO |
2340 | DebugFree(item, poison, size, falsefalse); |
2341 | #endif |
2342 | } |
2343 | #endif // MMGC_HOOKS |
2344 | |
2345 | EnterFrame::EnterFrame() : |
2346 | m_heap(NULL__null), |
2347 | m_gc(NULL__null), |
2348 | m_abortUnwindList(NULL__null), |
2349 | m_previous(NULL__null), |
2350 | m_suspended(falsefalse) |
2351 | { |
2352 | GCHeap *heap = GCHeap::GetGCHeap(); |
2353 | EnterFrame *ef = m_previous = heap->GetEnterFrame(); |
2354 | |
2355 | if(ef && ef->Suspended()) { |
2356 | // propagate the active gc from the suspended frame |
2357 | m_gc = ef->GetActiveGC(); |
2358 | } |
2359 | |
2360 | if(ef == NULL__null || ef->Suspended()) { |
2361 | m_heap = heap; |
2362 | heap->Enter(this); |
2363 | } |
2364 | } |
2365 | |
2366 | // this is the first thing we run after the Abort longjmp |
2367 | EnterFrame::~EnterFrame() |
2368 | { |
2369 | if(m_heap) { |
2370 | GCHeap *heap = m_heap; |
2371 | // this prevents us from doing multiple jumps in case leave results in more allocations |
2372 | m_heap = NULL__null; |
2373 | heap->Leave(); |
2374 | } |
2375 | } |
2376 | |
2377 | void EnterFrame::UnwindAllObjects() |
2378 | { |
2379 | while(m_abortUnwindList) |
2380 | { |
2381 | AbortUnwindObject *previous = m_abortUnwindList; |
2382 | m_abortUnwindList->Unwind(); |
2383 | // The unwind call may remove the handler or may leave it on this list. If it leaves it, then make sure to advance the list, |
2384 | // otherwise, the list will automatically advance if it's removed. |
2385 | if (m_abortUnwindList == previous) |
2386 | { |
2387 | m_abortUnwindList = m_abortUnwindList->next; |
2388 | } |
2389 | } |
2390 | } |
2391 | |
2392 | void EnterFrame::AddAbortUnwindObject(AbortUnwindObject *obj) |
2393 | { |
2394 | GCAssertMsg(!EnterFrame::IsAbortUnwindObjectInList(obj), "obj can't be added to list twice!")do { } while (0); |
2395 | // Push it on the front |
2396 | obj->next = m_abortUnwindList; |
2397 | if (m_abortUnwindList) |
2398 | { |
2399 | m_abortUnwindList->previous = obj; |
2400 | } |
2401 | m_abortUnwindList = obj; |
2402 | } |
2403 | |
2404 | void EnterFrame::RemoveAbortUnwindObject(AbortUnwindObject *obj) |
2405 | { |
2406 | GCAssertMsg(obj == m_abortUnwindList || obj->previous != NULL, "Object not in list")do { } while (0); |
2407 | |
2408 | if (obj == m_abortUnwindList) |
2409 | { |
2410 | m_abortUnwindList = obj->next; |
2411 | } |
2412 | |
2413 | if (obj->previous != NULL__null) |
2414 | { |
2415 | (obj->previous)->next = obj->next; |
2416 | } |
2417 | if (obj->next != NULL__null) |
2418 | { |
2419 | (obj->next)->previous = obj->previous; |
2420 | } |
2421 | |
2422 | obj->next = NULL__null; |
2423 | obj->previous = NULL__null; |
2424 | } |
2425 | |
2426 | #ifdef DEBUG |
2427 | |
2428 | AbortUnwindObject::~AbortUnwindObject() |
2429 | { |
2430 | GCAssertMsg(!EnterFrame::IsAbortUnwindObjectInList(this), "RemoveAbortUnwindObject not called, dangling pointer in list.")do { } while (0); |
2431 | } |
2432 | |
2433 | /*static*/ |
2434 | boolbool EnterFrame::IsAbortUnwindObjectInList(AbortUnwindObject *obj) |
2435 | { |
2436 | GCHeap *heap = GCHeap::GetGCHeap(); |
2437 | EnterFrame *frame; |
2438 | if(heap && (frame = heap->GetEnterFrame()) != NULL__null) |
2439 | { |
2440 | AbortUnwindObject *list = frame->m_abortUnwindList; |
2441 | while(list) { |
2442 | if(list == obj) |
2443 | return truetrue; |
2444 | list = list->next; |
2445 | } |
2446 | } |
2447 | return falsefalse; |
2448 | } |
2449 | #endif |
2450 | |
2451 | SuspendEnterFrame::SuspendEnterFrame() : m_ef(NULL__null) |
2452 | { |
2453 | GCHeap *heap = GCHeap::GetGCHeap(); |
2454 | if(heap) { |
2455 | EnterFrame *ef = heap->GetEnterFrame(); |
2456 | if(ef) { |
2457 | ef->Suspend(); |
2458 | m_ef = ef; |
2459 | } |
2460 | } |
2461 | } |
2462 | |
2463 | SuspendEnterFrame::~SuspendEnterFrame() |
2464 | { |
2465 | if(m_ef) |
2466 | m_ef->Resume(); |
2467 | GCHeap *heap = GCHeap::GetGCHeap(); |
2468 | GCAssertMsg(heap->GetEnterFrame() == m_ef, "EnterFrame's not unwound properly")do { } while (0); |
2469 | if(heap->GetStatus() == kMemAbort) |
2470 | heap->Abort(); |
2471 | } |
2472 | |
2473 | void GCHeap::SystemOOMEvent(size_t size, int attempt) |
2474 | { |
2475 | if (attempt == 0 && !statusNotificationBeingSent()) |
2476 | SendFreeMemorySignal(size/kBlockSize + 1); |
2477 | else |
2478 | Abort(); |
2479 | } |
2480 | |
2481 | /*static*/ |
2482 | void GCHeap::SignalObjectTooLarge() |
2483 | { |
2484 | GCLog("Implementation limit exceeded: attempting to allocate too-large object\n"); |
2485 | GetGCHeap()->Abort(); |
2486 | } |
2487 | |
2488 | /*static*/ |
2489 | void GCHeap::SignalInconsistentHeapState(const char* reason) |
2490 | { |
2491 | GCAssert(!"Inconsistent heap state; aborting")do { } while (0); |
2492 | GCLog("Inconsistent heap state: %s\n", reason); |
2493 | GetGCHeap()->Abort(); |
2494 | } |
2495 | |
2496 | /*static*/ |
2497 | void GCHeap::SignalExternalAllocation(size_t nbytes) |
2498 | { |
2499 | GCHeap* heap = GetGCHeap(); |
2500 | |
2501 | MMGC_LOCK_ALLOW_RECURSION(heap->m_spinlock, heap->m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&heap->m_spinlock , heap->m_notificationThread); |
2502 | |
2503 | heap->externalPressure += nbytes; |
2504 | |
2505 | heap->CheckForMemoryLimitsExceeded(); |
2506 | |
2507 | } |
2508 | |
2509 | /*static*/ |
2510 | void GCHeap::SignalExternalDeallocation(size_t nbytes) |
2511 | { |
2512 | GCHeap* heap = GetGCHeap(); |
2513 | |
2514 | MMGC_LOCK_ALLOW_RECURSION(heap->m_spinlock, heap->m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&heap->m_spinlock , heap->m_notificationThread); |
2515 | |
2516 | heap->externalPressure -= nbytes; |
2517 | heap->CheckForStatusReturnToNormal(); |
2518 | } |
2519 | |
2520 | /*static*/ |
2521 | void GCHeap::SignalExternalFreeMemory(size_t minimumBytesToFree /*= kMaxObjectSize */) |
2522 | { |
2523 | GCHeap* heap = GetGCHeap(); |
2524 | GCAssertMsg(heap != NULL, "GCHeap not valid!")do { } while (0); |
2525 | |
2526 | MMGC_LOCK_ALLOW_RECURSION(heap->m_spinlock, heap->m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&heap->m_spinlock , heap->m_notificationThread); |
2527 | |
2528 | // When calling SendFreeMemorySignal with kMaxObjectSize it will try to release |
2529 | // as much memory as possible. Otherwise it interprets the parameter as number |
2530 | // of blocks to be freed. This function uses bytes instead. The value is converted |
2531 | // to blocks here, except when kMaxObjectSize has been passed in so that it will |
2532 | // still trigger freeing maximum amount of memory. The division may lose some |
2533 | // precision, but SendFreeMemorySignal adds one more block to the requested amount |
2534 | // so that is ok. |
2535 | heap->SendFreeMemorySignal((minimumBytesToFree != kMaxObjectSize) ? minimumBytesToFree / GCHeap::kBlockSize : minimumBytesToFree); |
2536 | } |
2537 | |
2538 | // This can *always* be called. It will clean up the state on the current thread |
2539 | // if appropriate, otherwise do nothing. It *must* be called by host code if the |
2540 | // host code jumps past an MMGC_ENTER instance. (The Flash player does that, in |
2541 | // some circumstances.) |
2542 | |
2543 | /*static*/ |
2544 | void GCHeap::SignalImminentAbort() |
2545 | { |
2546 | if (instance == NULL__null) |
2547 | return; |
2548 | EnterFrame* ef = GetGCHeap()->GetEnterFrame(); |
2549 | if (ef == NULL__null) |
2550 | return; |
2551 | |
2552 | // We don't know if we're holding the lock but we can release it anyhow, |
2553 | // on the assumption that this operation will not cause problems if the |
2554 | // lock is not held or is held by another thread. |
2555 | // |
2556 | // Release lock so we don't deadlock if exit or longjmp end up coming |
2557 | // back to GCHeap (all callers must have this lock). |
2558 | |
2559 | VMPI_lockRelease(&instance->m_spinlock); |
2560 | |
2561 | // If the current thread is holding a lock for a GC that's not currently active on the thread |
2562 | // then break the lock: the current thread is collecting in that GC, but the Abort has cancelled |
2563 | // the collection. |
2564 | ef->UnwindAllObjects(); |
2565 | |
2566 | // Clear the enterFrame because we're jumping past MMGC_ENTER. |
2567 | GetGCHeap()->enterFrame = NULL__null; |
2568 | } |
2569 | |
2570 | void GCHeap::Abort() |
2571 | { |
2572 | status = kMemAbort; |
2573 | EnterFrame *ef = enterFrame; |
2574 | |
2575 | // If we hit abort, we need to turn m_oomHandling back on so that listeners are guaranteed to get this signal |
2576 | // We also need to set m_notoficationThread to NULL in case we hit abort while we were processing another memory status change |
2577 | m_oomHandling = truetrue; |
2578 | m_notificationThread = NULL__null; |
2579 | |
2580 | GCLog("error: out of memory\n"); |
2581 | |
2582 | // release lock so we don't deadlock if exit or longjmp end up coming |
2583 | // back to GCHeap (all callers must have this lock) |
2584 | VMPI_lockRelease(&m_spinlock); |
2585 | |
2586 | // Lock must not be held when we call VMPI_exit, deadlocks ensue on Linux |
2587 | if(config.OOMExitCode != 0) |
2588 | { |
2589 | VMPI_exit::exit(config.OOMExitCode); |
2590 | } |
2591 | |
2592 | if (ef != NULL__null) |
2593 | { |
2594 | // Guard against repeated jumps: ef->m_heap doubles as a flag. We go Abort->longjmp->~EnterFrame->Leave |
2595 | // and Leave calls StatusChangeNotify and the host code might do another allocation during shutdown |
2596 | // in which case we want to go to VMPI_abort instead. At that point m_heap will be NULL and the right |
2597 | // thing happens. |
2598 | if (ef->m_heap != NULL__null) |
2599 | { |
2600 | ef->UnwindAllObjects(); |
2601 | VMPI_longjmpNoUnwind::_longjmp(ef->jmpbuf, 1); |
2602 | } |
2603 | } |
2604 | GCAssertMsg(false, "MMGC_ENTER missing or we allocated more memory trying to shutdown")do { } while (0); |
2605 | VMPI_abort::abort(); |
2606 | } |
2607 | |
2608 | void GCHeap::Enter(EnterFrame *frame) |
2609 | { |
2610 | enterCount++; |
2611 | enterFrame = frame; |
2612 | } |
2613 | |
2614 | void GCHeap::Leave() |
2615 | { |
2616 | { |
2617 | MMGC_LOCK(m_spinlock)MMgc::GCAcquireSpinlock _lock(&m_spinlock); |
2618 | |
2619 | if(status == kMemAbort && !abortStatusNotificationSent) { |
2620 | abortStatusNotificationSent = truetrue; |
2621 | StatusChangeNotify(kMemAbort); |
2622 | } |
2623 | } |
2624 | |
2625 | EnterLock(); |
2626 | |
2627 | // do this after StatusChangeNotify it affects ShouldNotEnter |
2628 | |
2629 | // need to check if enterFrame is valid, it might have been nulled out by SignalImminentAbort |
2630 | EnterFrame* enter = enterFrame; |
2631 | if (enter) |
2632 | enterFrame = enter->Previous(); |
2633 | |
2634 | enterCount--; |
2635 | |
2636 | // last one out of the pool pulls the plug |
2637 | if(status == kMemAbort && enterCount == 0 && abortStatusNotificationSent && preventDestruct == 0) { |
2638 | DestroyInstance(); |
2639 | } |
2640 | EnterRelease(); |
2641 | } |
2642 | void GCHeap::log_percentage(const char *name, size_t bytes, size_t bytes_compare) |
2643 | { |
2644 | bytes_compare = size_t((bytes*100.0)/bytes_compare); |
2645 | if(bytes > 1<<20) { |
2646 | GCLog("%s %u (%.1fM) %u%%\n", name, (unsigned int)(bytes / GCHeap::kBlockSize), bytes * 1.0 / (1024*1024), (unsigned int)(bytes_compare)); |
2647 | } else { |
2648 | GCLog("%s %u (%uK) %u%%\n", name, (unsigned int)(bytes / GCHeap::kBlockSize), (unsigned int)(bytes / 1024), (unsigned int)(bytes_compare)); |
2649 | } |
2650 | } |
2651 | |
2652 | void GCHeap::DumpMemoryInfo() |
2653 | { |
2654 | MMGC_LOCK(m_spinlock)MMgc::GCAcquireSpinlock _lock(&m_spinlock); |
2655 | size_t priv = VMPI_getPrivateResidentPageCount() * VMPI_getVMPageSize(); |
2656 | size_t mmgc = GetTotalHeapSize() * GCHeap::kBlockSize; |
2657 | size_t unmanaged = GetFixedMalloc()->GetTotalSize() * GCHeap::kBlockSize; |
2658 | size_t fixed_alloced; |
2659 | size_t fixed_asksize; |
2660 | GetFixedMalloc()->GetUsageInfo(fixed_asksize, fixed_alloced); |
2661 | |
2662 | size_t gc_total=0; |
2663 | size_t gc_allocated_total =0; |
2664 | size_t gc_ask_total = 0; |
2665 | size_t gc_count = 0; |
2666 | BasicListIterator<GC*> iter(gcManager.gcs()); |
2667 | GC* gc; |
2668 | while((gc = iter.next()) != NULL__null) |
2669 | { |
2670 | #ifdef MMGC_MEMORY_PROFILER |
2671 | GCLog("[mem] GC 0x%p:%s\n", (void*)gc, GetAllocationName(gc)); |
2672 | #else |
2673 | GCLog("[mem] GC 0x%p\n", (void*)gc); |
2674 | #endif |
2675 | gc->DumpMemoryInfo(); |
2676 | |
2677 | size_t ask; |
2678 | size_t allocated; |
2679 | gc->GetUsageInfo(ask, allocated); |
2680 | gc_ask_total += ask; |
2681 | gc_allocated_total += allocated; |
2682 | gc_count += 1; |
2683 | |
2684 | gc_total += gc->GetNumBlocks() * kBlockSize; |
2685 | } |
2686 | |
2687 | #ifdef MMGC_MEMORY_PROFILER |
2688 | fixedMalloc.DumpMemoryInfo(); |
2689 | #endif |
2690 | |
2691 | // Gross stats are not meaningful if the profiler is running, see bugzilla 490014. |
2692 | // Disabling their printing is just an expedient fix to avoid misleading data being |
2693 | // printed. There are other, more complicated, fixes we should adopt. |
2694 | |
2695 | GCLog("[mem] ------- gross stats -----\n"); |
2696 | #ifdef MMGC_MEMORY_PROFILER |
2697 | if (GCHeap::GetGCHeap()->GetProfiler() == NULL__null) |
2698 | #endif |
2699 | { |
2700 | log_percentage("[mem] private", priv, priv); |
2701 | log_percentage("[mem]\t mmgc", mmgc, priv); |
2702 | log_percentage("[mem]\t\t unmanaged", unmanaged, priv); |
2703 | log_percentage("[mem]\t\t managed", gc_total, priv); |
2704 | log_percentage("[mem]\t\t free", (size_t)GetFreeHeapSize() * GCHeap::kBlockSize, priv); |
2705 | log_percentage("[mem]\t other", priv - mmgc, priv); |
2706 | log_percentage("[mem] \tunmanaged overhead ", unmanaged-fixed_alloced, unmanaged); |
2707 | log_percentage("[mem] \tmanaged overhead ", gc_total - gc_allocated_total, gc_total); |
2708 | #ifdef MMGC_MEMORY_PROFILER |
2709 | if(HooksEnabled()) |
2710 | { |
2711 | log_percentage("[mem] \tunmanaged internal wastage", fixed_alloced - fixed_asksize, fixed_alloced); |
2712 | log_percentage("[mem] \tmanaged internal wastage", gc_allocated_total - gc_ask_total, gc_allocated_total); |
2713 | } |
2714 | #endif |
2715 | GCLog("[mem] number of collectors %u\n", unsigned(gc_count)); |
2716 | } |
2717 | #ifdef MMGC_MEMORY_PROFILER |
2718 | else |
2719 | GCLog("[mem] No gross stats available when profiler is enabled.\n"); |
2720 | #endif |
2721 | GCLog("[mem] -------- gross stats end -----\n"); |
2722 | |
2723 | #ifdef MMGC_MEMORY_PROFILER |
2724 | if(hasSpy) |
2725 | DumpFatties(); |
2726 | #endif |
2727 | |
2728 | if (config.verbose) |
2729 | DumpHeapRep(); |
2730 | } |
2731 | |
2732 | void GCHeap::LogChar(char c, size_t count) |
2733 | { |
2734 | char tmp[100]; |
2735 | char* buf = count < 100 ? tmp : (char*)VMPI_alloc(count+1); |
2736 | if (buf == NULL__null) |
2737 | return; |
2738 | VMPI_memset::memset(buf, c, count); |
2739 | buf[count] = '\0'; |
2740 | |
2741 | GCLog(buf); |
2742 | if (buf != tmp) |
2743 | VMPI_free(buf); |
2744 | } |
2745 | |
2746 | void GCHeap::DumpHeapRep() |
2747 | { |
2748 | Region **regions = NULL__null; |
2749 | Region *r = lastRegion; |
2750 | int numRegions = 0; |
2751 | |
2752 | GCLog("Heap representation format: \n"); |
2753 | GCLog("region base address - commitTop/reserveTop\n"); |
2754 | GCLog("[0 == free, 1 == committed, - = uncommitted]*\n"); |
2755 | |
2756 | // count and sort regions |
2757 | while(r) { |
2758 | numRegions++; |
2759 | r = r->prev; |
2760 | } |
2761 | regions = (Region**) VMPI_alloc(sizeof(Region*)*numRegions); |
2762 | if (regions == NULL__null) |
2763 | return; |
2764 | r = lastRegion; |
2765 | for(int i=0; i < numRegions; i++, r = r->prev) { |
2766 | int insert = i; |
2767 | for(int j=0; j < i; j++) { |
2768 | if(r->baseAddr < regions[j]->baseAddr) { |
2769 | memmove(®ions[j+1], ®ions[j], sizeof(Region*) * (i - j)); |
2770 | insert = j; |
2771 | break; |
2772 | } |
2773 | } |
2774 | regions[insert] = r; |
2775 | } |
2776 | |
2777 | HeapBlock *spanningBlock = NULL__null; |
2778 | for(int i=0; i < numRegions; i++) |
2779 | { |
2780 | r = regions[i]; |
2781 | GCLog("0x%p - 0x%p/0x%p\n", r->baseAddr, r->commitTop, r->reserveTop); |
2782 | char c; |
2783 | char *addr = r->baseAddr; |
2784 | |
2785 | if(spanningBlock) { |
2786 | GCAssert(spanningBlock->baseAddr + (spanningBlock->size * kBlockSize) > r->baseAddr)do { } while (0); |
2787 | GCAssert(spanningBlock->baseAddr < r->baseAddr)do { } while (0); |
2788 | char *end = spanningBlock->baseAddr + (spanningBlock->size * kBlockSize); |
2789 | if(end > r->reserveTop) |
2790 | end = r->reserveTop; |
2791 | |
2792 | LogChar(spanningBlock->inUse() ? '1' : '0', (end - addr)/kBlockSize); |
2793 | addr = end; |
2794 | |
2795 | if(addr == spanningBlock->baseAddr + (spanningBlock->size * kBlockSize)) |
2796 | spanningBlock = NULL__null; |
2797 | } |
2798 | HeapBlock *hb; |
2799 | while(addr != r->commitTop && (hb = AddrToBlock(addr)) != NULL__null) { |
2800 | GCAssert(hb->size != 0)do { } while (0); |
2801 | |
2802 | if(hb->inUse()) |
2803 | c = '1'; |
2804 | else if(hb->committed) |
2805 | c = '0'; |
2806 | else |
2807 | c = '-'; |
2808 | size_t i, n; |
2809 | for(i=0, n=hb->size; i < n; i++, addr += GCHeap::kBlockSize) { |
2810 | if(addr == r->reserveTop) { |
2811 | // end of region! |
2812 | spanningBlock = hb; |
2813 | break; |
2814 | } |
2815 | } |
2816 | |
2817 | LogChar(c, i); |
2818 | } |
2819 | |
2820 | LogChar('-', (r->reserveTop - addr) / kBlockSize); |
2821 | |
2822 | GCLog("\n"); |
2823 | } |
2824 | VMPI_free(regions); |
2825 | } |
2826 | |
2827 | #ifdef MMGC_MEMORY_PROFILER |
2828 | |
2829 | /* static */ |
2830 | void GCHeap::InitProfiler() |
2831 | { |
2832 | GCAssert(IsProfilerInitialized() == false)do { } while (0); |
2833 | |
2834 | profiler = VMPI_isMemoryProfilingEnabled() ? new MemoryProfiler() : NULL__null; |
2835 | } |
2836 | |
2837 | #endif //MMGC_MEMORY_PROFILER |
2838 | |
2839 | #ifdef MMGC_MEMORY_PROFILER |
2840 | #ifdef MMGC_USE_SYSTEM_MALLOC |
2841 | |
2842 | void GCHeap::TrackSystemAlloc(void *addr, size_t askSize) |
2843 | { |
2844 | MMGC_LOCK_ALLOW_RECURSION(m_spinlock, m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&m_spinlock, m_notificationThread ); |
2845 | if(!IsProfilerInitialized()) |
2846 | InitProfiler(); |
2847 | if(profiler) |
2848 | profiler->RecordAllocation(addr, askSize, VMPI_size(addr), /*managed=*/falsefalse); |
2849 | } |
2850 | |
2851 | void GCHeap::TrackSystemFree(void *addr) |
2852 | { |
2853 | MMGC_LOCK_ALLOW_RECURSION(m_spinlock, m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&m_spinlock, m_notificationThread ); |
2854 | if(addr && profiler) |
2855 | profiler->RecordDeallocation(addr, VMPI_size(addr)); |
2856 | } |
2857 | |
2858 | #endif //MMGC_USE_SYSTEM_MALLOC |
2859 | #endif // MMGC_MEMORY_PROFILER |
2860 | |
2861 | void GCHeap::ReleaseMemory(char *address, size_t size) |
2862 | { |
2863 | if(config.useVirtualMemory) { |
2864 | boolbool success = VMPI_releaseMemoryRegion(address, size); |
2865 | GCAssert(success)do { } while (0); |
2866 | (void)success; |
2867 | } else { |
2868 | VMPI_releaseAlignedMemory(address); |
2869 | } |
2870 | } |
2871 | |
2872 | void GCManager::destroy() |
2873 | { |
2874 | collectors.Destroy(); |
2875 | } |
2876 | |
2877 | void GCManager::signalStartCollection(GC* gc) |
2878 | { |
2879 | BasicListIterator<GC*> iter(collectors); |
2880 | GC* otherGC; |
2881 | while((otherGC = iter.next()) != NULL__null) |
2882 | otherGC->policy.signalStartCollection(gc); |
2883 | } |
2884 | |
2885 | void GCManager::signalEndCollection(GC* gc) |
2886 | { |
2887 | BasicListIterator<GC*> iter(collectors); |
2888 | GC* otherGC; |
2889 | while((otherGC = iter.next()) != NULL__null) |
2890 | otherGC->policy.signalStartCollection(gc); |
2891 | } |
2892 | |
2893 | /* this method is the heart of the OOM system. |
2894 | its here that we call out to the mutator which may call |
2895 | back in to free memory or try to get more. |
2896 | |
2897 | Note! The caller needs to hold on to the m_spinlock before calling this! |
2898 | */ |
2899 | |
2900 | void GCHeap::SendFreeMemorySignal(size_t minimumBlocksToFree) |
2901 | { |
2902 | // If we're already in the process of sending out memory notifications, don't bother verifying now. |
2903 | // Also, we only want to send the "free memory" signal while our memory is in a normal state. Once |
2904 | // we've entered softLimit or abort state, we want to allow the softlimit or abort processing to return |
2905 | // the heap to normal before continuing. |
2906 | |
2907 | if (statusNotificationBeingSent() || status != kMemNormal || !m_oomHandling) |
2908 | return; |
2909 | |
2910 | m_notificationThread = VMPI_currentThread(); |
2911 | |
2912 | size_t startingTotal = GetTotalHeapSize() + externalPressure / kBlockSize; |
2913 | size_t currentTotal = startingTotal; |
Value stored to 'currentTotal' during its initialization is never read | |
2914 | |
2915 | BasicListIterator<OOMCallback*> iter(callbacks); |
2916 | OOMCallback *cb = NULL__null; |
2917 | boolbool bContinue = truetrue; |
2918 | do { |
2919 | cb = iter.next(); |
2920 | if(cb) |
2921 | { |
2922 | VMPI_lockRelease(&m_spinlock); |
2923 | cb->memoryStatusChange(kFreeMemoryIfPossible, kFreeMemoryIfPossible); |
2924 | VMPI_lockAcquire(&m_spinlock); |
2925 | |
2926 | Decommit(); |
2927 | currentTotal = GetTotalHeapSize() + externalPressure / kBlockSize; |
2928 | |
2929 | // If we've freed MORE than the minimum amount, we can stop freeing |
2930 | if ((startingTotal - currentTotal) > minimumBlocksToFree) |
2931 | { |
2932 | bContinue = falsefalse; |
2933 | } |
2934 | } |
2935 | } while(cb != NULL__null && bContinue); |
2936 | |
2937 | iter.MarkCursorInList(); |
2938 | |
2939 | m_notificationThread = NULL__null; |
2940 | } |
2941 | |
2942 | void GCHeap::StatusChangeNotify(MemoryStatus to) |
2943 | { |
2944 | // If we're already in the process of sending this notification, don't resend |
2945 | if ((statusNotificationBeingSent() && to == status) || !m_oomHandling) |
2946 | return; |
2947 | |
2948 | m_notificationThread = VMPI_currentThread(); |
2949 | |
2950 | MemoryStatus oldStatus = status; |
2951 | status = to; |
2952 | |
2953 | BasicListIterator<OOMCallback*> iter(callbacks); |
2954 | OOMCallback *cb = NULL__null; |
2955 | do { |
2956 | { |
2957 | cb = iter.next(); |
2958 | } |
2959 | if(cb) |
2960 | { |
2961 | VMPI_lockRelease(&m_spinlock); |
2962 | cb->memoryStatusChange(oldStatus, to); |
2963 | VMPI_lockAcquire(&m_spinlock); |
2964 | } |
2965 | } while(cb != NULL__null); |
2966 | |
2967 | |
2968 | m_notificationThread = NULL__null; |
2969 | |
2970 | CheckForStatusReturnToNormal(); |
2971 | } |
2972 | |
2973 | /*static*/ |
2974 | boolbool GCHeap::ShouldNotEnter() |
2975 | { |
2976 | // don't enter if the heap is already gone or we're aborting but not on the aborting call stack in a nested enter call |
2977 | GCHeap *heap = GetGCHeap(); |
2978 | if(heap == NULL__null || |
2979 | (heap->GetStatus() == kMemAbort && |
2980 | (heap->GetEnterFrame() == NULL__null || heap->GetEnterFrame()->Suspended()))) |
2981 | return truetrue; |
2982 | return falsefalse; |
2983 | } |
2984 | |
2985 | boolbool GCHeap::IsAddressInHeap(void *addr) |
2986 | { |
2987 | void *block = (void*)(uintptr_t(addr) & kBlockMask); |
2988 | return SafeSize(block) != (size_t)-1; |
2989 | } |
2990 | |
2991 | // Every new GC must register itself with the GCHeap. |
2992 | void GCHeap::AddGC(GC *gc) |
2993 | { |
2994 | boolbool bAdded = falsefalse; |
2995 | { |
2996 | MMGC_LOCK(m_spinlock)MMgc::GCAcquireSpinlock _lock(&m_spinlock); |
2997 | // hack to allow GCManager's list back in for list mem operations |
2998 | vmpi_thread_t notificationThreadSave = m_notificationThread; |
2999 | m_notificationThread = VMPI_currentThread(); |
3000 | bAdded = gcManager.tryAddGC(gc); |
3001 | m_notificationThread = notificationThreadSave; |
3002 | } |
3003 | if (!bAdded) |
3004 | { |
3005 | Abort(); |
3006 | } |
3007 | } |
3008 | |
3009 | // When the GC is destroyed it must remove itself from the GCHeap. |
3010 | void GCHeap::RemoveGC(GC *gc) |
3011 | { |
3012 | MMGC_LOCK_ALLOW_RECURSION(m_spinlock, m_notificationThread)MMgc::GCAcquireSpinlockWithRecursion _lock(&m_spinlock, m_notificationThread ); |
3013 | // hack to allow GCManager's list back in for list mem operations |
3014 | vmpi_thread_t notificationThreadSave = m_notificationThread; |
3015 | m_notificationThread = VMPI_currentThread(); |
3016 | gcManager.removeGC(gc); |
3017 | m_notificationThread = notificationThreadSave; |
3018 | EnterFrame* ef = GetEnterFrame(); |
3019 | if (ef && ef->GetActiveGC() == gc) |
3020 | ef->SetActiveGC(NULL__null); |
3021 | } |
3022 | |
3023 | void GCHeap::AddOOMCallback(OOMCallback *p) |
3024 | { |
3025 | boolbool bAdded = falsefalse; |
3026 | { |
3027 | MMGC_LOCK(m_spinlock)MMgc::GCAcquireSpinlock _lock(&m_spinlock); |
3028 | // hack to allow GCManager's list back in for list mem operations |
3029 | vmpi_thread_t notificationThreadSave = m_notificationThread; |
3030 | m_notificationThread = VMPI_currentThread(); |
3031 | bAdded = callbacks.TryAdd(p); |
3032 | m_notificationThread = notificationThreadSave; |
3033 | } |
3034 | if (!bAdded) |
3035 | { |
3036 | Abort(); |
3037 | } |
3038 | } |
3039 | |
3040 | void GCHeap::RemoveOOMCallback(OOMCallback *p) |
3041 | { |
3042 | MMGC_LOCK(m_spinlock)MMgc::GCAcquireSpinlock _lock(&m_spinlock); |
3043 | // hack to allow GCManager's list back in for list mem operations |
3044 | vmpi_thread_t notificationThreadSave = m_notificationThread; |
3045 | m_notificationThread = VMPI_currentThread(); |
3046 | callbacks.Remove(p); |
3047 | m_notificationThread = notificationThreadSave; |
3048 | } |
3049 | |
3050 | boolbool GCHeap::EnsureFreeRegion(boolbool allowExpansion) |
3051 | { |
3052 | if(!HaveFreeRegion()) { |
3053 | boolbool zero = falsefalse; |
3054 | HeapBlock *block = AllocBlock(1, zero, 1); |
3055 | if(block) { |
3056 | nextRegion = (Region*)(void *)block->baseAddr; |
3057 | } else if(allowExpansion) { |
3058 | ExpandHeap(1); |
3059 | // We must have hit the hard limit or OS limit |
3060 | if(nextRegion == NULL__null) |
3061 | return falsefalse; |
3062 | } |
3063 | } |
3064 | return truetrue; |
3065 | } |
3066 | |
3067 | GCHeap::Region *GCHeap::NewRegion(char *baseAddr, char *rTop, char *cTop, size_t blockId) |
3068 | { |
3069 | Region *r = freeRegion; |
3070 | if(r) { |
3071 | freeRegion = *(Region**)freeRegion; |
3072 | } else { |
3073 | r = nextRegion++; |
3074 | if(roundUp((uintptr_t)nextRegion, kBlockSize)((((uintptr_t)nextRegion + kBlockSize - 1) / kBlockSize) * kBlockSize ) - (uintptr_t)nextRegion < sizeof(Region)) |
3075 | nextRegion = NULL__null; // fresh page allocated in ExpandHeap |
3076 | } |
3077 | new (r) Region(this, baseAddr, rTop, cTop, blockId); |
3078 | return r; |
3079 | } |
3080 | |
3081 | void GCHeap::FreeRegion(Region *r) |
3082 | { |
3083 | if(r == lastRegion) |
3084 | lastRegion = r->prev; |
3085 | *(Region**)r = freeRegion; |
3086 | freeRegion = r; |
3087 | |
3088 | } |
3089 | |
3090 | /*static*/ |
3091 | void GCHeap::EnterLockInit() |
3092 | { |
3093 | if (!instanceEnterLockInitialized) |
3094 | { |
3095 | instanceEnterLockInitialized = truetrue; |
3096 | VMPI_lockInit(&instanceEnterLock); |
3097 | } |
3098 | } |
3099 | |
3100 | /*static*/ |
3101 | void GCHeap::EnterLockDestroy() |
3102 | { |
3103 | GCAssert(instanceEnterLockInitialized)do { } while (0); |
3104 | VMPI_lockDestroy(&instanceEnterLock); |
3105 | instanceEnterLockInitialized = falsefalse; |
3106 | } |
3107 | |
3108 | GCHeap::Region::Region(GCHeap *heap, char *baseAddr, char *rTop, char *cTop, size_t blockId) |
3109 | : prev(heap->lastRegion), |
3110 | baseAddr(baseAddr), |
3111 | reserveTop(rTop), |
3112 | commitTop(cTop), |
3113 | blockId(blockId) |
3114 | { |
3115 | heap->lastRegion = this; |
3116 | } |
3117 | |
3118 | #ifdef DEBUG |
3119 | void GCHeap::CheckForOOMAbortAllocation() |
3120 | { |
3121 | if(m_notificationThread == VMPI_currentThread() && status == kMemAbort) |
3122 | GCAssertMsg(false, "Its not legal to perform allocations during OOM kMemAbort callback")do { } while (0); |
3123 | } |
3124 | #endif |
3125 | |
3126 | boolbool GCHeap::QueryCanReturnToNormal() |
3127 | { |
3128 | // must be below soft limit _AND_ above decommit threshold |
3129 | return GetUsedHeapSize() + externalPressure/kBlockSize < config.heapSoftLimit && |
3130 | FreeMemoryExceedsDecommitThreshold(); |
3131 | } |
3132 | |
3133 | boolbool GCHeap::FreeMemoryExceedsDecommitThreshold() |
3134 | { |
3135 | return GetFreeHeapSize() * 100 > GetTotalHeapSize() * kDecommitThresholdPercentage; |
3136 | } |
3137 | } |