1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : *
3 : * ***** BEGIN LICENSE BLOCK *****
4 : * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 : *
6 : * The contents of this file are subject to the Mozilla Public License Version
7 : * 1.1 (the "License"); you may not use this file except in compliance with
8 : * the License. You may obtain a copy of the License at
9 : * http://www.mozilla.org/MPL/
10 : *
11 : * Software distributed under the License is distributed on an "AS IS" basis,
12 : * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 : * for the specific language governing rights and limitations under the
14 : * License.
15 : *
16 : * The Original Code is Mozilla Communicator client code, released
17 : * March 31, 1998.
18 : *
19 : * The Initial Developer of the Original Code is
20 : * Netscape Communications Corporation.
21 : * Portions created by the Initial Developer are Copyright (C) 1998
22 : * the Initial Developer. All Rights Reserved.
23 : *
24 : * Contributor(s):
25 : *
26 : * Alternatively, the contents of this file may be used under the terms of
27 : * either of the GNU General Public License Version 2 or later (the "GPL"),
28 : * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 : * in which case the provisions of the GPL or the LGPL are applicable instead
30 : * of those above. If you wish to allow use of your version of this file only
31 : * under the terms of either the GPL or the LGPL, and not to allow others to
32 : * use your version of this file under the terms of the MPL, indicate your
33 : * decision by deleting the provisions above and replace them with the notice
34 : * and other provisions required by the GPL or the LGPL. If you do not delete
35 : * the provisions above, a recipient may use your version of this file under
36 : * the terms of any one of the MPL, the GPL or the LGPL.
37 : *
38 : * ***** END LICENSE BLOCK ***** */
39 :
40 : #ifndef jsgc_h___
41 : #define jsgc_h___
42 :
43 : /*
44 : * JS Garbage Collector.
45 : */
46 : #include <setjmp.h>
47 :
48 : #include "mozilla/Util.h"
49 :
50 : #include "jsalloc.h"
51 : #include "jstypes.h"
52 : #include "jsprvtd.h"
53 : #include "jspubtd.h"
54 : #include "jsdhash.h"
55 : #include "jslock.h"
56 : #include "jsutil.h"
57 : #include "jsversion.h"
58 : #include "jscell.h"
59 :
60 : #include "ds/BitArray.h"
61 : #include "gc/Statistics.h"
62 : #include "js/HashTable.h"
63 : #include "js/Vector.h"
64 : #include "js/TemplateLib.h"
65 :
66 : struct JSCompartment;
67 :
68 : extern "C" void
69 : js_TraceXML(JSTracer *trc, JSXML* thing);
70 :
71 : #if JS_STACK_GROWTH_DIRECTION > 0
72 : # define JS_CHECK_STACK_SIZE(limit, lval) ((uintptr_t)(lval) < limit)
73 : #else
74 : # define JS_CHECK_STACK_SIZE(limit, lval) ((uintptr_t)(lval) > limit)
75 : #endif
76 :
77 : namespace js {
78 :
79 : class GCHelperThread;
80 : struct Shape;
81 :
82 : namespace gc {
83 :
84 : enum State {
85 : NO_INCREMENTAL,
86 : MARK_ROOTS,
87 : MARK,
88 : SWEEP,
89 : INVALID
90 : };
91 :
92 : struct Arena;
93 :
94 : /*
95 : * This must be an upper bound, but we do not need the least upper bound, so
96 : * we just exclude non-background objects.
97 : */
98 : const size_t MAX_BACKGROUND_FINALIZE_KINDS = FINALIZE_LIMIT - FINALIZE_OBJECT_LIMIT / 2;
99 :
100 : /*
101 : * Page size is 4096 by default, except for SPARC, where it is 8192.
102 : * Note: Do not use JS_CPU_SPARC here, this header is used outside JS.
103 : * Bug 692267: Move page size definition to gc/Memory.h and include it
104 : * directly once jsgc.h is no longer an installed header.
105 : */
106 : #if defined(SOLARIS) && (defined(__sparc) || defined(__sparcv9))
107 : const size_t PageShift = 13;
108 : #else
109 : const size_t PageShift = 12;
110 : #endif
111 : const size_t PageSize = size_t(1) << PageShift;
112 :
113 : const size_t ChunkShift = 20;
114 : const size_t ChunkSize = size_t(1) << ChunkShift;
115 : const size_t ChunkMask = ChunkSize - 1;
116 :
117 : const size_t ArenaShift = PageShift;
118 : const size_t ArenaSize = PageSize;
119 : const size_t ArenaMask = ArenaSize - 1;
120 :
121 : /*
122 : * This is the maximum number of arenas we allow in the FreeCommitted state
123 : * before we trigger a GC_SHRINK to release free arenas to the OS.
124 : */
125 : const static uint32_t FreeCommittedArenasThreshold = (32 << 20) / ArenaSize;
126 :
127 : /*
128 : * The mark bitmap has one bit per each GC cell. For multi-cell GC things this
129 : * wastes space but allows to avoid expensive devisions by thing's size when
130 : * accessing the bitmap. In addition this allows to use some bits for colored
131 : * marking during the cycle GC.
132 : */
133 : const size_t ArenaCellCount = size_t(1) << (ArenaShift - Cell::CellShift);
134 : const size_t ArenaBitmapBits = ArenaCellCount;
135 : const size_t ArenaBitmapBytes = ArenaBitmapBits / 8;
136 : const size_t ArenaBitmapWords = ArenaBitmapBits / JS_BITS_PER_WORD;
137 :
138 : /*
139 : * A FreeSpan represents a contiguous sequence of free cells in an Arena.
140 : * |first| is the address of the first free cell in the span. |last| is the
141 : * address of the last free cell in the span. This last cell holds a FreeSpan
142 : * data structure for the next span unless this is the last span on the list
143 : * of spans in the arena. For this last span |last| points to the last byte of
144 : * the last thing in the arena and no linkage is stored there, so
145 : * |last| == arenaStart + ArenaSize - 1. If the space at the arena end is
146 : * fully used this last span is empty and |first| == |last + 1|.
147 : *
148 : * Thus |first| < |last| implies that we have either the last span with at least
149 : * one element or that the span is not the last and contains at least 2
150 : * elements. In both cases to allocate a thing from this span we need simply
151 : * to increment |first| by the allocation size.
152 : *
153 : * |first| == |last| implies that we have a one element span that records the
154 : * next span. So to allocate from it we need to update the span list head
155 : * with a copy of the span stored at |last| address so the following
156 : * allocations will use that span.
157 : *
158 : * |first| > |last| implies that we have an empty last span and the arena is
159 : * fully used.
160 : *
161 : * Also only for the last span (|last| & 1)! = 0 as all allocation sizes are
162 : * multiples of Cell::CellSize.
163 : */
164 : struct FreeSpan {
165 : uintptr_t first;
166 : uintptr_t last;
167 :
168 : public:
169 6814712 : FreeSpan() {}
170 :
171 30307367 : FreeSpan(uintptr_t first, uintptr_t last)
172 30307367 : : first(first), last(last) {
173 30307367 : checkSpan();
174 30307335 : }
175 :
176 : /*
177 : * To minimize the size of the arena header the first span is encoded
178 : * there as offsets from the arena start.
179 : */
180 5331337 : static size_t encodeOffsets(size_t firstOffset, size_t lastOffset) {
181 : /* Check that we can pack the offsets into uint16. */
182 : JS_STATIC_ASSERT(ArenaShift < 16);
183 5331337 : JS_ASSERT(firstOffset <= ArenaSize);
184 5331336 : JS_ASSERT(lastOffset < ArenaSize);
185 5331336 : JS_ASSERT(firstOffset <= ((lastOffset + 1) & ~size_t(1)));
186 5331336 : return firstOffset | (lastOffset << 16);
187 : }
188 :
189 : /*
190 : * Encoded offsets for a full arena when its first span is the last one
191 : * and empty.
192 : */
193 : static const size_t FullArenaOffsets = ArenaSize | ((ArenaSize - 1) << 16);
194 :
195 30307420 : static FreeSpan decodeOffsets(uintptr_t arenaAddr, size_t offsets) {
196 30307420 : JS_ASSERT(!(arenaAddr & ArenaMask));
197 :
198 30307412 : size_t firstOffset = offsets & 0xFFFF;
199 30307412 : size_t lastOffset = offsets >> 16;
200 30307412 : JS_ASSERT(firstOffset <= ArenaSize);
201 30307412 : JS_ASSERT(lastOffset < ArenaSize);
202 :
203 : /*
204 : * We must not use | when calculating first as firstOffset is
205 : * ArenaMask + 1 for the empty span.
206 : */
207 30307412 : return FreeSpan(arenaAddr + firstOffset, arenaAddr | lastOffset);
208 : }
209 :
210 1785856 : void initAsEmpty(uintptr_t arenaAddr = 0) {
211 1785856 : JS_ASSERT(!(arenaAddr & ArenaMask));
212 1785856 : first = arenaAddr + ArenaSize;
213 1785856 : last = arenaAddr | (ArenaSize - 1);
214 1785856 : JS_ASSERT(isEmpty());
215 1785856 : }
216 :
217 40205068 : bool isEmpty() const {
218 40205068 : checkSpan();
219 40205068 : return first > last;
220 : }
221 :
222 36429818 : bool hasNext() const {
223 36429818 : checkSpan();
224 36429812 : return !(last & uintptr_t(1));
225 : }
226 :
227 33846341 : const FreeSpan *nextSpan() const {
228 33846341 : JS_ASSERT(hasNext());
229 33846342 : return reinterpret_cast<FreeSpan *>(last);
230 : }
231 :
232 13528507 : FreeSpan *nextSpanUnchecked(size_t thingSize) const {
233 : #ifdef DEBUG
234 13528507 : uintptr_t lastOffset = last & ArenaMask;
235 13528507 : JS_ASSERT(!(lastOffset & 1));
236 13528507 : JS_ASSERT((ArenaSize - lastOffset) % thingSize == 0);
237 : #endif
238 13528507 : return reinterpret_cast<FreeSpan *>(last);
239 : }
240 :
241 812783034 : uintptr_t arenaAddressUnchecked() const {
242 812783034 : return last & ~ArenaMask;
243 : }
244 :
245 14307409 : uintptr_t arenaAddress() const {
246 14307409 : checkSpan();
247 14307409 : return arenaAddressUnchecked();
248 : }
249 :
250 1451057 : ArenaHeader *arenaHeader() const {
251 1451057 : return reinterpret_cast<ArenaHeader *>(arenaAddress());
252 : }
253 :
254 1204023 : bool isSameNonEmptySpan(const FreeSpan *another) const {
255 1204023 : JS_ASSERT(!isEmpty());
256 1204023 : JS_ASSERT(!another->isEmpty());
257 1204023 : return first == another->first && last == another->last;
258 : }
259 :
260 5058885 : bool isWithinArena(uintptr_t arenaAddr) const {
261 5058885 : JS_ASSERT(!(arenaAddr & ArenaMask));
262 :
263 : /* Return true for the last empty span as well. */
264 5058885 : return arenaAddress() == arenaAddr;
265 : }
266 :
267 5058885 : size_t encodeAsOffsets() const {
268 : /*
269 : * We must use first - arenaAddress(), not first & ArenaMask as
270 : * first == ArenaMask + 1 for an empty span.
271 : */
272 5058885 : uintptr_t arenaAddr = arenaAddress();
273 5058885 : return encodeOffsets(first - arenaAddr, last & ArenaMask);
274 : }
275 :
276 : /* See comments before FreeSpan for details. */
277 270373605 : JS_ALWAYS_INLINE void *allocate(size_t thingSize) {
278 270373605 : JS_ASSERT(thingSize % Cell::CellSize == 0);
279 270373605 : checkSpan();
280 270373598 : uintptr_t thing = first;
281 270373598 : if (thing < last) {
282 : /* Bump-allocate from the current span. */
283 266128987 : first = thing + thingSize;
284 4244611 : } else if (JS_LIKELY(thing == last)) {
285 : /*
286 : * Move to the next span. We use JS_LIKELY as without PGO
287 : * compilers mis-predict == here as unlikely to succeed.
288 : */
289 1538535 : *this = *reinterpret_cast<FreeSpan *>(thing);
290 : } else {
291 2706076 : return NULL;
292 : }
293 267667522 : checkSpan();
294 267667515 : return reinterpret_cast<void *>(thing);
295 : }
296 :
297 : /* A version of allocate when we know that the span is not empty. */
298 272469 : JS_ALWAYS_INLINE void *infallibleAllocate(size_t thingSize) {
299 272469 : JS_ASSERT(thingSize % Cell::CellSize == 0);
300 272469 : checkSpan();
301 272469 : uintptr_t thing = first;
302 272469 : if (thing < last) {
303 167390 : first = thing + thingSize;
304 : } else {
305 105079 : JS_ASSERT(thing == last);
306 105079 : *this = *reinterpret_cast<FreeSpan *>(thing);
307 : }
308 272469 : checkSpan();
309 272469 : return reinterpret_cast<void *>(thing);
310 : }
311 :
312 : /*
313 : * Allocate from a newly allocated arena. We do not move the free list
314 : * from the arena. Rather we set the arena up as fully used during the
315 : * initialization so to allocate we simply return the first thing in the
316 : * arena and set the free list to point to the second.
317 : */
318 2067427 : JS_ALWAYS_INLINE void *allocateFromNewArena(uintptr_t arenaAddr, size_t firstThingOffset,
319 : size_t thingSize) {
320 2067427 : JS_ASSERT(!(arenaAddr & ArenaMask));
321 2067427 : uintptr_t thing = arenaAddr | firstThingOffset;
322 2067427 : first = thing + thingSize;
323 2067427 : last = arenaAddr | ArenaMask;
324 2067427 : checkSpan();
325 2067427 : return reinterpret_cast<void *>(thing);
326 : }
327 :
328 692673797 : void checkSpan() const {
329 : #ifdef DEBUG
330 : /* We do not allow spans at the end of the address space. */
331 692673797 : JS_ASSERT(last != uintptr_t(-1));
332 692673636 : JS_ASSERT(first);
333 692673636 : JS_ASSERT(last);
334 692673636 : JS_ASSERT(first - 1 <= last);
335 692673636 : uintptr_t arenaAddr = arenaAddressUnchecked();
336 692681895 : if (last & 1) {
337 : /* The span is the last. */
338 586868786 : JS_ASSERT((last & ArenaMask) == ArenaMask);
339 :
340 586872170 : if (first - 1 == last) {
341 : /* The span is last and empty. The above start != 0 check
342 : * implies that we are not at the end of the address space.
343 : */
344 43332544 : return;
345 : }
346 543539626 : size_t spanLength = last - first + 1;
347 543539626 : JS_ASSERT(spanLength % Cell::CellSize == 0);
348 :
349 : /* Start and end must belong to the same arena. */
350 543539621 : JS_ASSERT((first & ~ArenaMask) == arenaAddr);
351 543539621 : return;
352 : }
353 :
354 : /* The span is not the last and we have more spans to follow. */
355 105813109 : JS_ASSERT(first <= last);
356 105813077 : size_t spanLengthWithoutOneThing = last - first;
357 105813077 : JS_ASSERT(spanLengthWithoutOneThing % Cell::CellSize == 0);
358 :
359 105813077 : JS_ASSERT((first & ~ArenaMask) == arenaAddr);
360 :
361 : /*
362 : * If there is not enough space before the arena end to allocate one
363 : * more thing, then the span must be marked as the last one to avoid
364 : * storing useless empty span reference.
365 : */
366 105813077 : size_t beforeTail = ArenaSize - (last & ArenaMask);
367 105813077 : JS_ASSERT(beforeTail >= sizeof(FreeSpan) + Cell::CellSize);
368 :
369 105813077 : FreeSpan *next = reinterpret_cast<FreeSpan *>(last);
370 :
371 : /*
372 : * The GC things on the list of free spans come from one arena
373 : * and the spans are linked in ascending address order with
374 : * at least one non-free thing between spans.
375 : */
376 105813077 : JS_ASSERT(last < next->first);
377 105813077 : JS_ASSERT(arenaAddr == next->arenaAddressUnchecked());
378 :
379 105813072 : if (next->first > next->last) {
380 : /*
381 : * The next span is the empty span that terminates the list for
382 : * arenas that do not have any free things at the end.
383 : */
384 19429078 : JS_ASSERT(next->first - 1 == next->last);
385 19429078 : JS_ASSERT(arenaAddr + ArenaSize == next->first);
386 : }
387 : #endif
388 : }
389 :
390 : };
391 :
392 : /* Every arena has a header. */
393 : struct ArenaHeader {
394 : friend struct FreeLists;
395 :
396 : JSCompartment *compartment;
397 :
398 : /*
399 : * ArenaHeader::next has two purposes: when unallocated, it points to the
400 : * next available Arena's header. When allocated, it points to the next
401 : * arena of the same size class and compartment.
402 : */
403 : ArenaHeader *next;
404 :
405 : private:
406 : /*
407 : * The first span of free things in the arena. We encode it as the start
408 : * and end offsets within the arena, not as FreeSpan structure, to
409 : * minimize the header size.
410 : */
411 : size_t firstFreeSpanOffsets;
412 :
413 : /*
414 : * One of AllocKind constants or FINALIZE_LIMIT when the arena does not
415 : * contain any GC things and is on the list of empty arenas in the GC
416 : * chunk. The latter allows to quickly check if the arena is allocated
417 : * during the conservative GC scanning without searching the arena in the
418 : * list.
419 : */
420 : size_t allocKind : 8;
421 :
422 : /*
423 : * When recursive marking uses too much stack the marking is delayed and
424 : * the corresponding arenas are put into a stack using the following field
425 : * as a linkage. To distinguish the bottom of the stack from the arenas
426 : * not present in the stack we use an extra flag to tag arenas on the
427 : * stack.
428 : *
429 : * Delayed marking is also used for arenas that we allocate into during an
430 : * incremental GC. In this case, we intend to mark all the objects in the
431 : * arena, and it's faster to do this marking in bulk.
432 : *
433 : * To minimize the ArenaHeader size we record the next delayed marking
434 : * linkage as arenaAddress() >> ArenaShift and pack it with the allocKind
435 : * field and hasDelayedMarking flag. We use 8 bits for the allocKind, not
436 : * ArenaShift - 1, so the compiler can use byte-level memory instructions
437 : * to access it.
438 : */
439 : public:
440 : size_t hasDelayedMarking : 1;
441 : size_t allocatedDuringIncremental : 1;
442 : size_t markOverflow : 1;
443 : size_t nextDelayedMarking : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
444 :
445 : static void staticAsserts() {
446 : /* We must be able to fit the allockind into uint8_t. */
447 : JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
448 :
449 : /*
450 : * nextDelayedMarkingpacking assumes that ArenaShift has enough bits
451 : * to cover allocKind and hasDelayedMarking.
452 : */
453 : JS_STATIC_ASSERT(ArenaShift >= 8 + 1 + 1 + 1);
454 : }
455 :
456 : inline uintptr_t address() const;
457 : inline Chunk *chunk() const;
458 :
459 775266408 : bool allocated() const {
460 775266408 : JS_ASSERT(allocKind <= size_t(FINALIZE_LIMIT));
461 775266408 : return allocKind < size_t(FINALIZE_LIMIT);
462 : }
463 :
464 2067427 : void init(JSCompartment *comp, AllocKind kind) {
465 2067427 : JS_ASSERT(!allocated());
466 2067427 : JS_ASSERT(!markOverflow);
467 2067427 : JS_ASSERT(!allocatedDuringIncremental);
468 2067427 : JS_ASSERT(!hasDelayedMarking);
469 2067427 : compartment = comp;
470 :
471 : JS_STATIC_ASSERT(FINALIZE_LIMIT <= 255);
472 2067427 : allocKind = size_t(kind);
473 :
474 : /* See comments in FreeSpan::allocateFromNewArena. */
475 2067427 : firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
476 2067427 : }
477 :
478 13059560 : void setAsNotAllocated() {
479 13059560 : allocKind = size_t(FINALIZE_LIMIT);
480 13059560 : markOverflow = 0;
481 13059560 : allocatedDuringIncremental = 0;
482 13059560 : hasDelayedMarking = 0;
483 13059560 : nextDelayedMarking = 0;
484 13059560 : }
485 :
486 44512118 : uintptr_t arenaAddress() const {
487 44512118 : return address();
488 : }
489 :
490 5631019 : Arena *getArena() {
491 5631019 : return reinterpret_cast<Arena *>(arenaAddress());
492 : }
493 :
494 481094773 : AllocKind getAllocKind() const {
495 481094773 : JS_ASSERT(allocated());
496 481094729 : return AllocKind(allocKind);
497 : }
498 :
499 : inline size_t getThingSize() const;
500 :
501 18375868 : bool hasFreeThings() const {
502 18375868 : return firstFreeSpanOffsets != FreeSpan::FullArenaOffsets;
503 : }
504 :
505 : inline bool isEmpty() const;
506 :
507 420185 : void setAsFullyUsed() {
508 420185 : firstFreeSpanOffsets = FreeSpan::FullArenaOffsets;
509 420185 : }
510 :
511 16429899 : FreeSpan getFirstFreeSpan() const {
512 : #ifdef DEBUG
513 16429899 : checkSynchronizedWithFreeList();
514 : #endif
515 16429845 : return FreeSpan::decodeOffsets(arenaAddress(), firstFreeSpanOffsets);
516 : }
517 :
518 5058885 : void setFirstFreeSpan(const FreeSpan *span) {
519 5058885 : JS_ASSERT(span->isWithinArena(arenaAddress()));
520 5058885 : firstFreeSpanOffsets = span->encodeAsOffsets();
521 5058885 : }
522 :
523 : #ifdef DEBUG
524 : void checkSynchronizedWithFreeList() const;
525 : #endif
526 :
527 : inline ArenaHeader *getNextDelayedMarking() const;
528 : inline void setNextDelayedMarking(ArenaHeader *aheader);
529 : };
530 :
531 : struct Arena {
532 : /*
533 : * Layout of an arena:
534 : * An arena is 4K in size and 4K-aligned. It starts with the ArenaHeader
535 : * descriptor followed by some pad bytes. The remainder of the arena is
536 : * filled with the array of T things. The pad bytes ensure that the thing
537 : * array ends exactly at the end of the arena.
538 : *
539 : * +-------------+-----+----+----+-----+----+
540 : * | ArenaHeader | pad | T0 | T1 | ... | Tn |
541 : * +-------------+-----+----+----+-----+----+
542 : *
543 : * <----------------------------------------> = ArenaSize bytes
544 : * <-------------------> = first thing offset
545 : */
546 : ArenaHeader aheader;
547 : uint8_t data[ArenaSize - sizeof(ArenaHeader)];
548 :
549 : private:
550 : static JS_FRIEND_DATA(const uint32_t) ThingSizes[];
551 : static JS_FRIEND_DATA(const uint32_t) FirstThingOffsets[];
552 :
553 : public:
554 : static void staticAsserts();
555 :
556 528541064 : static size_t thingSize(AllocKind kind) {
557 528541064 : return ThingSizes[kind];
558 : }
559 :
560 35132149 : static size_t firstThingOffset(AllocKind kind) {
561 35132149 : return FirstThingOffsets[kind];
562 : }
563 :
564 17408641 : static size_t thingsPerArena(size_t thingSize) {
565 17408641 : JS_ASSERT(thingSize % Cell::CellSize == 0);
566 :
567 : /* We should be able to fit FreeSpan in any GC thing. */
568 17408641 : JS_ASSERT(thingSize >= sizeof(FreeSpan));
569 :
570 17408641 : return (ArenaSize - sizeof(ArenaHeader)) / thingSize;
571 : }
572 :
573 1089 : static size_t thingsSpan(size_t thingSize) {
574 1089 : return thingsPerArena(thingSize) * thingSize;
575 : }
576 :
577 502344013 : static bool isAligned(uintptr_t thing, size_t thingSize) {
578 : /* Things ends at the arena end. */
579 502344013 : uintptr_t tailOffset = (ArenaSize - thing) & ArenaMask;
580 502344013 : return tailOffset % thingSize == 0;
581 : }
582 :
583 29525194 : uintptr_t address() const {
584 29525194 : return aheader.address();
585 : }
586 :
587 20821259 : uintptr_t thingsStart(AllocKind thingKind) {
588 20821259 : return address() | firstThingOffset(thingKind);
589 : }
590 :
591 8703935 : uintptr_t thingsEnd() {
592 8703935 : return address() + ArenaSize;
593 : }
594 :
595 : template <typename T>
596 : bool finalize(JSContext *cx, AllocKind thingKind, size_t thingSize, bool background);
597 : };
598 :
599 : /* The chunk header (located at the end of the chunk to preserve arena alignment). */
600 : struct ChunkInfo {
601 : Chunk *next;
602 : Chunk **prevp;
603 :
604 : /* Free arenas are linked together with aheader.next. */
605 : ArenaHeader *freeArenasHead;
606 :
607 : /*
608 : * Decommitted arenas are tracked by a bitmap in the chunk header. We use
609 : * this offset to start our search iteration close to a decommitted arena
610 : * that we can allocate.
611 : */
612 : uint32_t lastDecommittedArenaOffset;
613 :
614 : /* Number of free arenas, either committed or decommitted. */
615 : uint32_t numArenasFree;
616 :
617 : /* Number of free, committed arenas. */
618 : uint32_t numArenasFreeCommitted;
619 :
620 : /* Number of GC cycles this chunk has survived. */
621 : uint32_t age;
622 : };
623 :
624 : /*
625 : * Calculating ArenasPerChunk:
626 : *
627 : * In order to figure out how many Arenas will fit in a chunk, we need to know
628 : * how much extra space is available after we allocate the header data. This
629 : * is a problem because the header size depends on the number of arenas in the
630 : * chunk. The two dependent fields are bitmap and decommittedArenas.
631 : *
632 : * For the mark bitmap, we know that each arena will use a fixed number of full
633 : * bytes: ArenaBitmapBytes. The full size of the header data is this number
634 : * multiplied by the eventual number of arenas we have in the header. We,
635 : * conceptually, distribute this header data among the individual arenas and do
636 : * not include it in the header. This way we do not have to worry about its
637 : * variable size: it gets attached to the variable number we are computing.
638 : *
639 : * For the decommitted arena bitmap, we only have 1 bit per arena, so this
640 : * technique will not work. Instead, we observe that we do not have enough
641 : * header info to fill 8 full arenas: it is currently 4 on 64bit, less on
642 : * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
643 : * This will not become 63 bytes unless we double the data required in the
644 : * header. Therefore, we just compute the number of bytes required to track
645 : * every possible arena and do not worry about slop bits, since there are too
646 : * few to usefully allocate.
647 : *
648 : * To actually compute the number of arenas we can allocate in a chunk, we
649 : * divide the amount of available space less the header info (not including
650 : * the mark bitmap which is distributed into the arena size) by the size of
651 : * the arena (with the mark bitmap bytes it uses).
652 : */
653 : const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
654 : const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
655 : const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
656 : const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
657 :
658 : /* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
659 : struct ChunkBitmap {
660 : uintptr_t bitmap[ArenaBitmapWords * ArenasPerChunk];
661 :
662 : JS_ALWAYS_INLINE void getMarkWordAndMask(const Cell *cell, uint32_t color,
663 : uintptr_t **wordp, uintptr_t *maskp);
664 :
665 1191989300 : JS_ALWAYS_INLINE bool isMarked(const Cell *cell, uint32_t color) {
666 : uintptr_t *word, mask;
667 1191989300 : getMarkWordAndMask(cell, color, &word, &mask);
668 1191970148 : return *word & mask;
669 : }
670 :
671 935030078 : JS_ALWAYS_INLINE bool markIfUnmarked(const Cell *cell, uint32_t color) {
672 : uintptr_t *word, mask;
673 935030078 : getMarkWordAndMask(cell, BLACK, &word, &mask);
674 935030078 : if (*word & mask)
675 439455202 : return false;
676 495574876 : *word |= mask;
677 495574876 : if (color != BLACK) {
678 : /*
679 : * We use getMarkWordAndMask to recalculate both mask and word as
680 : * doing just mask << color may overflow the mask.
681 : */
682 12148726 : getMarkWordAndMask(cell, color, &word, &mask);
683 12148726 : if (*word & mask)
684 0 : return false;
685 12148726 : *word |= mask;
686 : }
687 495574876 : return true;
688 : }
689 :
690 : JS_ALWAYS_INLINE void unmark(const Cell *cell, uint32_t color) {
691 : uintptr_t *word, mask;
692 : getMarkWordAndMask(cell, color, &word, &mask);
693 : *word &= ~mask;
694 : }
695 :
696 157935 : void clear() {
697 157935 : PodArrayZero(bitmap);
698 157935 : }
699 :
700 : #ifdef DEBUG
701 2153 : bool noBitsSet(ArenaHeader *aheader) {
702 : /*
703 : * We assume that the part of the bitmap corresponding to the arena
704 : * has the exact number of words so we do not need to deal with a word
705 : * that covers bits from two arenas.
706 : */
707 : JS_STATIC_ASSERT(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD);
708 :
709 : uintptr_t *word, unused;
710 2153 : getMarkWordAndMask(reinterpret_cast<Cell *>(aheader->address()), BLACK, &word, &unused);
711 36601 : for (size_t i = 0; i != ArenaBitmapWords; i++) {
712 34448 : if (word[i])
713 0 : return false;
714 : }
715 2153 : return true;
716 : }
717 : #endif
718 : };
719 :
720 : JS_STATIC_ASSERT(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap));
721 :
722 : typedef BitArray<ArenasPerChunk> PerArenaBitmap;
723 :
724 : const size_t ChunkPadSize = ChunkSize
725 : - (sizeof(Arena) * ArenasPerChunk)
726 : - sizeof(ChunkBitmap)
727 : - sizeof(PerArenaBitmap)
728 : - sizeof(ChunkInfo);
729 : JS_STATIC_ASSERT(ChunkPadSize < BytesPerArenaWithHeader);
730 :
731 : /*
732 : * Chunks contain arenas and associated data structures (mark bitmap, delayed
733 : * marking state).
734 : */
735 : struct Chunk {
736 : Arena arenas[ArenasPerChunk];
737 :
738 : /* Pad to full size to ensure cache alignment of ChunkInfo. */
739 : uint8_t padding[ChunkPadSize];
740 :
741 : ChunkBitmap bitmap;
742 : PerArenaBitmap decommittedArenas;
743 : ChunkInfo info;
744 :
745 217009915 : static Chunk *fromAddress(uintptr_t addr) {
746 217009915 : addr &= ~ChunkMask;
747 217009915 : return reinterpret_cast<Chunk *>(addr);
748 : }
749 :
750 -1 : static bool withinArenasRange(uintptr_t addr) {
751 -1 : uintptr_t offset = addr & ChunkMask;
752 -1 : return offset < ArenasPerChunk * ArenaSize;
753 : }
754 :
755 11626109 : static size_t arenaIndex(uintptr_t addr) {
756 11626109 : JS_ASSERT(withinArenasRange(addr));
757 11626109 : return (addr & ChunkMask) >> ArenaShift;
758 : }
759 :
760 : uintptr_t address() const {
761 : uintptr_t addr = reinterpret_cast<uintptr_t>(this);
762 : JS_ASSERT(!(addr & ChunkMask));
763 : return addr;
764 : }
765 :
766 2159685 : bool unused() const {
767 2159685 : return info.numArenasFree == ArenasPerChunk;
768 : }
769 :
770 4190858 : bool hasAvailableArenas() const {
771 4190858 : return info.numArenasFree != 0;
772 : }
773 :
774 : inline void addToAvailableList(JSCompartment *compartment);
775 : inline void insertToAvailableList(Chunk **insertPoint);
776 : inline void removeFromAvailableList();
777 :
778 : ArenaHeader *allocateArena(JSCompartment *comp, AllocKind kind);
779 :
780 : void releaseArena(ArenaHeader *aheader);
781 :
782 : static Chunk *allocate(JSRuntime *rt);
783 :
784 : /* Must be called with the GC lock taken. */
785 : static inline void release(JSRuntime *rt, Chunk *chunk);
786 : static inline void releaseList(JSRuntime *rt, Chunk *chunkListHead);
787 :
788 : /* Must be called with the GC lock taken. */
789 : inline void prepareToBeFreed(JSRuntime *rt);
790 :
791 : /*
792 : * Assuming that the info.prevp points to the next field of the previous
793 : * chunk in a doubly-linked list, get that chunk.
794 : */
795 0 : Chunk *getPrevious() {
796 0 : JS_ASSERT(info.prevp);
797 0 : return fromPointerToNext(info.prevp);
798 : }
799 :
800 : /* Get the chunk from a pointer to its info.next field. */
801 0 : static Chunk *fromPointerToNext(Chunk **nextFieldPtr) {
802 0 : uintptr_t addr = reinterpret_cast<uintptr_t>(nextFieldPtr);
803 0 : JS_ASSERT((addr & ChunkMask) == offsetof(Chunk, info.next));
804 0 : return reinterpret_cast<Chunk *>(addr - offsetof(Chunk, info.next));
805 : }
806 :
807 : private:
808 : inline void init();
809 :
810 : /* Search for a decommitted arena to allocate. */
811 : unsigned findDecommittedArenaOffset();
812 : ArenaHeader* fetchNextDecommittedArena();
813 :
814 : public:
815 : /* Unlink and return the freeArenasHead. */
816 : inline ArenaHeader* fetchNextFreeArena(JSRuntime *rt);
817 :
818 : inline void addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader);
819 : };
820 :
821 : JS_STATIC_ASSERT(sizeof(Chunk) == ChunkSize);
822 :
823 : class ChunkPool {
824 : Chunk *emptyChunkListHead;
825 : size_t emptyCount;
826 :
827 : public:
828 19910 : ChunkPool()
829 : : emptyChunkListHead(NULL),
830 19910 : emptyCount(0) { }
831 :
832 12 : size_t getEmptyCount() const {
833 12 : return emptyCount;
834 : }
835 :
836 : inline bool wantBackgroundAllocation(JSRuntime *rt) const;
837 :
838 : /* Must be called with the GC lock taken. */
839 : inline Chunk *get(JSRuntime *rt);
840 :
841 : /* Must be called either during the GC or with the GC lock taken. */
842 : inline void put(Chunk *chunk);
843 :
844 : /*
845 : * Return the list of chunks that can be released outside the GC lock.
846 : * Must be called either during the GC or with the GC lock taken.
847 : */
848 : Chunk *expire(JSRuntime *rt, bool releaseAll);
849 :
850 : /* Must be called with the GC lock taken. */
851 : void expireAndFree(JSRuntime *rt, bool releaseAll);
852 :
853 : /* Must be called either during the GC or with the GC lock taken. */
854 : JS_FRIEND_API(int64_t) countCleanDecommittedArenas(JSRuntime *rt);
855 : };
856 :
857 : inline uintptr_t
858 -1 : Cell::address() const
859 : {
860 -1 : uintptr_t addr = uintptr_t(this);
861 -1 : JS_ASSERT(addr % Cell::CellSize == 0);
862 -1 : JS_ASSERT(Chunk::withinArenasRange(addr));
863 -1 : return addr;
864 : }
865 :
866 : inline ArenaHeader *
867 -965361959 : Cell::arenaHeader() const
868 : {
869 -965361959 : uintptr_t addr = address();
870 -965600691 : addr &= ~ArenaMask;
871 -965600691 : return reinterpret_cast<ArenaHeader *>(addr);
872 : }
873 :
874 : Chunk *
875 923784894 : Cell::chunk() const
876 : {
877 923784894 : uintptr_t addr = uintptr_t(this);
878 923784894 : JS_ASSERT(addr % Cell::CellSize == 0);
879 923784894 : addr &= ~(ChunkSize - 1);
880 923784894 : return reinterpret_cast<Chunk *>(addr);
881 : }
882 :
883 : AllocKind
884 353805402 : Cell::getAllocKind() const
885 : {
886 353805402 : return arenaHeader()->getAllocKind();
887 : }
888 :
889 : #ifdef DEBUG
890 : inline bool
891 459700422 : Cell::isAligned() const
892 : {
893 459700422 : return Arena::isAligned(address(), arenaHeader()->getThingSize());
894 : }
895 : #endif
896 :
897 : inline uintptr_t
898 76108787 : ArenaHeader::address() const
899 : {
900 76108787 : uintptr_t addr = reinterpret_cast<uintptr_t>(this);
901 76108787 : JS_ASSERT(!(addr & ArenaMask));
902 76108787 : JS_ASSERT(Chunk::withinArenasRange(addr));
903 76108625 : return addr;
904 : }
905 :
906 : inline Chunk *
907 2069473 : ArenaHeader::chunk() const
908 : {
909 2069473 : return Chunk::fromAddress(address());
910 : }
911 :
912 : inline bool
913 272469 : ArenaHeader::isEmpty() const
914 : {
915 : /* Arena is empty if its first span covers the whole arena. */
916 272469 : JS_ASSERT(allocated());
917 272469 : size_t firstThingOffset = Arena::firstThingOffset(getAllocKind());
918 272469 : return firstFreeSpanOffsets == FreeSpan::encodeOffsets(firstThingOffset, ArenaMask);
919 : }
920 :
921 : inline size_t
922 264325816 : ArenaHeader::getThingSize() const
923 : {
924 264325816 : JS_ASSERT(allocated());
925 264325787 : return Arena::thingSize(getAllocKind());
926 : }
927 :
928 : inline ArenaHeader *
929 52010 : ArenaHeader::getNextDelayedMarking() const
930 : {
931 52010 : return &reinterpret_cast<Arena *>(nextDelayedMarking << ArenaShift)->aheader;
932 : }
933 :
934 : inline void
935 52010 : ArenaHeader::setNextDelayedMarking(ArenaHeader *aheader)
936 : {
937 52010 : JS_ASSERT(!(uintptr_t(aheader) & ArenaMask));
938 52010 : hasDelayedMarking = 1;
939 52010 : nextDelayedMarking = aheader->arenaAddress() >> ArenaShift;
940 52010 : }
941 :
942 : JS_ALWAYS_INLINE void
943 923787047 : ChunkBitmap::getMarkWordAndMask(const Cell *cell, uint32_t color,
944 : uintptr_t **wordp, uintptr_t *maskp)
945 : {
946 923787047 : size_t bit = (cell->address() & ChunkMask) / Cell::CellSize + color;
947 923787047 : JS_ASSERT(bit < ArenaBitmapBits * ArenasPerChunk);
948 923787047 : *maskp = uintptr_t(1) << (bit % JS_BITS_PER_WORD);
949 923787047 : *wordp = &bitmap[bit / JS_BITS_PER_WORD];
950 923787047 : }
951 :
952 : static void
953 2127093528 : AssertValidColor(const void *thing, uint32_t color)
954 : {
955 : #ifdef DEBUG
956 2127093528 : ArenaHeader *aheader = reinterpret_cast<const js::gc::Cell *>(thing)->arenaHeader();
957 2126994586 : JS_ASSERT_IF(color, color < aheader->getThingSize() / Cell::CellSize);
958 : #endif
959 2126994586 : }
960 :
961 : inline bool
962 1192060736 : Cell::isMarked(uint32_t color) const
963 : {
964 1192060736 : AssertValidColor(this, color);
965 1192013176 : return chunk()->bitmap.isMarked(this, color);
966 : }
967 :
968 : bool
969 935030078 : Cell::markIfUnmarked(uint32_t color) const
970 : {
971 935030078 : AssertValidColor(this, color);
972 935030078 : return chunk()->bitmap.markIfUnmarked(this, color);
973 : }
974 :
975 : void
976 : Cell::unmark(uint32_t color) const
977 : {
978 : JS_ASSERT(color != BLACK);
979 : AssertValidColor(this, color);
980 : chunk()->bitmap.unmark(this, color);
981 : }
982 :
983 : JSCompartment *
984 1950543725 : Cell::compartment() const
985 : {
986 1950543725 : return arenaHeader()->compartment;
987 : }
988 :
989 : static inline JSGCTraceKind
990 101369472 : MapAllocToTraceKind(AllocKind thingKind)
991 : {
992 : static const JSGCTraceKind map[FINALIZE_LIMIT] = {
993 : JSTRACE_OBJECT, /* FINALIZE_OBJECT0 */
994 : JSTRACE_OBJECT, /* FINALIZE_OBJECT0_BACKGROUND */
995 : JSTRACE_OBJECT, /* FINALIZE_OBJECT2 */
996 : JSTRACE_OBJECT, /* FINALIZE_OBJECT2_BACKGROUND */
997 : JSTRACE_OBJECT, /* FINALIZE_OBJECT4 */
998 : JSTRACE_OBJECT, /* FINALIZE_OBJECT4_BACKGROUND */
999 : JSTRACE_OBJECT, /* FINALIZE_OBJECT8 */
1000 : JSTRACE_OBJECT, /* FINALIZE_OBJECT8_BACKGROUND */
1001 : JSTRACE_OBJECT, /* FINALIZE_OBJECT12 */
1002 : JSTRACE_OBJECT, /* FINALIZE_OBJECT12_BACKGROUND */
1003 : JSTRACE_OBJECT, /* FINALIZE_OBJECT16 */
1004 : JSTRACE_OBJECT, /* FINALIZE_OBJECT16_BACKGROUND */
1005 : JSTRACE_SCRIPT, /* FINALIZE_SCRIPT */
1006 : JSTRACE_SHAPE, /* FINALIZE_SHAPE */
1007 : JSTRACE_BASE_SHAPE, /* FINALIZE_BASE_SHAPE */
1008 : JSTRACE_TYPE_OBJECT,/* FINALIZE_TYPE_OBJECT */
1009 : #if JS_HAS_XML_SUPPORT /* FINALIZE_XML */
1010 : JSTRACE_XML,
1011 : #endif
1012 : JSTRACE_STRING, /* FINALIZE_SHORT_STRING */
1013 : JSTRACE_STRING, /* FINALIZE_STRING */
1014 : JSTRACE_STRING, /* FINALIZE_EXTERNAL_STRING */
1015 : };
1016 101369472 : return map[thingKind];
1017 : }
1018 :
1019 : inline JSGCTraceKind
1020 : GetGCThingTraceKind(const void *thing);
1021 :
1022 : struct ArenaLists {
1023 :
1024 : /*
1025 : * ArenaList::head points to the start of the list. Normally cursor points
1026 : * to the first arena in the list with some free things and all arenas
1027 : * before cursor are fully allocated. However, as the arena currently being
1028 : * allocated from is considered full while its list of free spans is moved
1029 : * into the freeList, during the GC or cell enumeration, when an
1030 : * unallocated freeList is moved back to the arena, we can see an arena
1031 : * with some free cells before the cursor. The cursor is an indirect
1032 : * pointer to allow for efficient list insertion at the cursor point and
1033 : * other list manipulations.
1034 : */
1035 : struct ArenaList {
1036 : ArenaHeader *head;
1037 : ArenaHeader **cursor;
1038 :
1039 2995653 : ArenaList() {
1040 2995653 : clear();
1041 2995653 : }
1042 :
1043 3232982 : void clear() {
1044 3232982 : head = NULL;
1045 3232982 : cursor = &head;
1046 3232982 : }
1047 : };
1048 :
1049 : private:
1050 : /*
1051 : * For each arena kind its free list is represented as the first span with
1052 : * free things. Initially all the spans are initialized as empty. After we
1053 : * find a new arena with available things we move its first free span into
1054 : * the list and set the arena as fully allocated. way we do not need to
1055 : * update the arena header after the initial allocation. When starting the
1056 : * GC we only move the head of the of the list of spans back to the arena
1057 : * only for the arena that was not fully allocated.
1058 : */
1059 : FreeSpan freeLists[FINALIZE_LIMIT];
1060 :
1061 : ArenaList arenaLists[FINALIZE_LIMIT];
1062 :
1063 : #ifdef JS_THREADSAFE
1064 : /*
1065 : * The background finalization adds the finalized arenas to the list at
1066 : * the *cursor position. backgroundFinalizeState controls the interaction
1067 : * between the GC lock and the access to the list from the allocation
1068 : * thread.
1069 : *
1070 : * BFS_DONE indicates that the finalizations is not running or cannot
1071 : * affect this arena list. The allocation thread can access the list
1072 : * outside the GC lock.
1073 : *
1074 : * In BFS_RUN and BFS_JUST_FINISHED the allocation thread must take the
1075 : * lock. The former indicates that the finalization still runs. The latter
1076 : * signals that finalization just added to the list finalized arenas. In
1077 : * that case the lock effectively serves as a read barrier to ensure that
1078 : * the allocation thread see all the writes done during finalization.
1079 : */
1080 : enum BackgroundFinalizeState {
1081 : BFS_DONE,
1082 : BFS_RUN,
1083 : BFS_JUST_FINISHED
1084 : };
1085 :
1086 : volatile uintptr_t backgroundFinalizeState[FINALIZE_LIMIT];
1087 : #endif
1088 :
1089 : public:
1090 45576 : ArenaLists() {
1091 957096 : for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
1092 911520 : freeLists[i].initAsEmpty();
1093 : #ifdef JS_THREADSAFE
1094 957096 : for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
1095 911520 : backgroundFinalizeState[i] = BFS_DONE;
1096 : #endif
1097 45576 : }
1098 :
1099 45571 : ~ArenaLists() {
1100 1913982 : for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
1101 : #ifdef JS_THREADSAFE
1102 : /*
1103 : * We can only call this during the shutdown after the last GC when
1104 : * the background finalization is disabled.
1105 : */
1106 911420 : JS_ASSERT(backgroundFinalizeState[i] == BFS_DONE);
1107 : #endif
1108 911420 : ArenaHeader **headp = &arenaLists[i].head;
1109 1714314 : while (ArenaHeader *aheader = *headp) {
1110 401447 : *headp = aheader->next;
1111 401447 : aheader->chunk()->releaseArena(aheader);
1112 : }
1113 : }
1114 45571 : }
1115 :
1116 11389367 : const FreeSpan *getFreeList(AllocKind thingKind) const {
1117 11389367 : return &freeLists[thingKind];
1118 : }
1119 :
1120 480896 : ArenaHeader *getFirstArena(AllocKind thingKind) const {
1121 480896 : return arenaLists[thingKind].head;
1122 : }
1123 :
1124 71386 : bool arenaListsAreEmpty() const {
1125 607802 : for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
1126 : #ifdef JS_THREADSAFE
1127 : /*
1128 : * The arena cannot be empty if the background finalization is not yet
1129 : * done.
1130 : */
1131 582149 : if (backgroundFinalizeState[i] != BFS_DONE)
1132 23328 : return false;
1133 : #endif
1134 558821 : if (arenaLists[i].head)
1135 22405 : return false;
1136 : }
1137 25653 : return true;
1138 : }
1139 :
1140 : #ifdef DEBUG
1141 108 : bool checkArenaListAllUnmarked() const {
1142 2268 : for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
1143 : # ifdef JS_THREADSAFE
1144 : /* The background finalization must have stopped at this point. */
1145 2339 : JS_ASSERT(backgroundFinalizeState[i] == BFS_DONE ||
1146 2339 : backgroundFinalizeState[i] == BFS_JUST_FINISHED);
1147 : # endif
1148 4313 : for (ArenaHeader *aheader = arenaLists[i].head; aheader; aheader = aheader->next) {
1149 2153 : if (!aheader->chunk()->bitmap.noBitsSet(aheader))
1150 0 : return false;
1151 : }
1152 : }
1153 108 : return true;
1154 : }
1155 : #endif
1156 :
1157 : #ifdef JS_THREADSAFE
1158 : bool doneBackgroundFinalize(AllocKind kind) const {
1159 : return backgroundFinalizeState[kind] == BFS_DONE;
1160 : }
1161 : #endif
1162 :
1163 : /*
1164 : * Return the free list back to the arena so the GC finalization will not
1165 : * run the finalizers over unitialized bytes from free things.
1166 : */
1167 122478 : void purge() {
1168 2572038 : for (size_t i = 0; i != FINALIZE_LIMIT; ++i) {
1169 2449560 : FreeSpan *headSpan = &freeLists[i];
1170 2449560 : if (!headSpan->isEmpty()) {
1171 516062 : ArenaHeader *aheader = headSpan->arenaHeader();
1172 516062 : aheader->setFirstFreeSpan(headSpan);
1173 516062 : headSpan->initAsEmpty();
1174 : }
1175 : }
1176 122478 : }
1177 :
1178 : inline void prepareForIncrementalGC(JSRuntime *rt);
1179 :
1180 : /*
1181 : * Temporarily copy the free list heads to the arenas so the code can see
1182 : * the proper value in ArenaHeader::freeList when accessing the latter
1183 : * outside the GC.
1184 : */
1185 134482 : void copyFreeListsToArenas() {
1186 2824122 : for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
1187 2689640 : copyFreeListToArena(AllocKind(i));
1188 134482 : }
1189 :
1190 2721625 : void copyFreeListToArena(AllocKind thingKind) {
1191 2721625 : FreeSpan *headSpan = &freeLists[thingKind];
1192 2721625 : if (!headSpan->isEmpty()) {
1193 663778 : ArenaHeader *aheader = headSpan->arenaHeader();
1194 663778 : JS_ASSERT(!aheader->hasFreeThings());
1195 663778 : aheader->setFirstFreeSpan(headSpan);
1196 : }
1197 2721625 : }
1198 :
1199 : /*
1200 : * Clear the free lists in arenas that were temporarily set there using
1201 : * copyToArenas.
1202 : */
1203 108819 : void clearFreeListsInArenas() {
1204 2285199 : for (size_t i = 0; i != FINALIZE_LIMIT; ++i)
1205 2176380 : clearFreeListInArena(AllocKind(i));
1206 108819 : }
1207 :
1208 :
1209 2208365 : void clearFreeListInArena(AllocKind kind) {
1210 2208365 : FreeSpan *headSpan = &freeLists[kind];
1211 2208365 : if (!headSpan->isEmpty()) {
1212 147716 : ArenaHeader *aheader = headSpan->arenaHeader();
1213 147716 : JS_ASSERT(aheader->getFirstFreeSpan().isSameNonEmptySpan(headSpan));
1214 147716 : aheader->setAsFullyUsed();
1215 : }
1216 2208365 : }
1217 :
1218 : /*
1219 : * Check that the free list is either empty or were synchronized with the
1220 : * arena using copyToArena().
1221 : */
1222 425066 : bool isSynchronizedFreeList(AllocKind kind) {
1223 425066 : FreeSpan *headSpan = &freeLists[kind];
1224 425066 : if (headSpan->isEmpty())
1225 353373 : return true;
1226 71693 : ArenaHeader *aheader = headSpan->arenaHeader();
1227 71693 : if (aheader->hasFreeThings()) {
1228 : /*
1229 : * If the arena has a free list, it must be the same as one in
1230 : * lists.
1231 : */
1232 39708 : JS_ASSERT(aheader->getFirstFreeSpan().isSameNonEmptySpan(headSpan));
1233 39708 : return true;
1234 : }
1235 31985 : return false;
1236 : }
1237 :
1238 270373604 : JS_ALWAYS_INLINE void *allocateFromFreeList(AllocKind thingKind, size_t thingSize) {
1239 270373604 : return freeLists[thingKind].allocate(thingSize);
1240 : }
1241 :
1242 : static void *refillFreeList(JSContext *cx, AllocKind thingKind);
1243 :
1244 25663 : void checkEmptyFreeLists() {
1245 : #ifdef DEBUG
1246 538923 : for (size_t i = 0; i < mozilla::ArrayLength(freeLists); ++i)
1247 513260 : JS_ASSERT(freeLists[i].isEmpty());
1248 : #endif
1249 25663 : }
1250 :
1251 : void checkEmptyFreeList(AllocKind kind) {
1252 : JS_ASSERT(freeLists[kind].isEmpty());
1253 : }
1254 :
1255 : void finalizeObjects(JSContext *cx);
1256 : void finalizeStrings(JSContext *cx);
1257 : void finalizeShapes(JSContext *cx);
1258 : void finalizeScripts(JSContext *cx);
1259 :
1260 : #ifdef JS_THREADSAFE
1261 : static void backgroundFinalize(JSContext *cx, ArenaHeader *listHead);
1262 : #endif
1263 :
1264 : private:
1265 : inline void finalizeNow(JSContext *cx, AllocKind thingKind);
1266 : inline void finalizeLater(JSContext *cx, AllocKind thingKind);
1267 :
1268 : inline void *allocateFromArena(JSCompartment *comp, AllocKind thingKind);
1269 : };
1270 :
1271 : /*
1272 : * Initial allocation size for data structures holding chunks is set to hold
1273 : * chunks with total capacity of 16MB to avoid buffer resizes during browser
1274 : * startup.
1275 : */
1276 : const size_t INITIAL_CHUNK_CAPACITY = 16 * 1024 * 1024 / ChunkSize;
1277 :
1278 : /* The number of GC cycles an empty chunk can survive before been released. */
1279 : const size_t MAX_EMPTY_CHUNK_AGE = 4;
1280 :
1281 : inline Cell *
1282 : AsCell(JSObject *obj)
1283 : {
1284 : return reinterpret_cast<Cell *>(obj);
1285 : }
1286 :
1287 : } /* namespace gc */
1288 :
1289 : struct GCPtrHasher
1290 : {
1291 : typedef void *Lookup;
1292 :
1293 0 : static HashNumber hash(void *key) {
1294 0 : return HashNumber(uintptr_t(key) >> JS_GCTHING_ZEROBITS);
1295 : }
1296 :
1297 0 : static bool match(void *l, void *k) { return l == k; }
1298 : };
1299 :
1300 : typedef HashMap<void *, uint32_t, GCPtrHasher, SystemAllocPolicy> GCLocks;
1301 :
1302 : struct RootInfo {
1303 21106968 : RootInfo() {}
1304 479080 : RootInfo(const char *name, JSGCRootType type) : name(name), type(type) {}
1305 : const char *name;
1306 : JSGCRootType type;
1307 : };
1308 :
1309 : typedef js::HashMap<void *,
1310 : RootInfo,
1311 : js::DefaultHasher<void *>,
1312 : js::SystemAllocPolicy> RootedValueMap;
1313 :
1314 : } /* namespace js */
1315 :
1316 : extern JS_FRIEND_API(JSGCTraceKind)
1317 : js_GetGCThingTraceKind(void *thing);
1318 :
1319 : extern JSBool
1320 : js_InitGC(JSRuntime *rt, uint32_t maxbytes);
1321 :
1322 : extern void
1323 : js_FinishGC(JSRuntime *rt);
1324 :
1325 : extern JSBool
1326 : js_AddRoot(JSContext *cx, js::Value *vp, const char *name);
1327 :
1328 : extern JSBool
1329 : js_AddGCThingRoot(JSContext *cx, void **rp, const char *name);
1330 :
1331 : #ifdef DEBUG
1332 : extern void
1333 : js_DumpNamedRoots(JSRuntime *rt,
1334 : void (*dump)(const char *name, void *rp, JSGCRootType type, void *data),
1335 : void *data);
1336 : #endif
1337 :
1338 : extern uint32_t
1339 : js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data);
1340 :
1341 : /* Table of pointers with count valid members. */
1342 : typedef struct JSPtrTable {
1343 : size_t count;
1344 : void **array;
1345 : } JSPtrTable;
1346 :
1347 : extern JSBool
1348 : js_LockGCThingRT(JSRuntime *rt, void *thing);
1349 :
1350 : extern void
1351 : js_UnlockGCThingRT(JSRuntime *rt, void *thing);
1352 :
1353 : extern JS_FRIEND_API(bool)
1354 : IsAboutToBeFinalized(const js::gc::Cell *thing);
1355 :
1356 : extern bool
1357 : IsAboutToBeFinalized(const js::Value &value);
1358 :
1359 : extern bool
1360 : js_IsAddressableGCThing(JSRuntime *rt, uintptr_t w, js::gc::AllocKind *thingKind, void **thing);
1361 :
1362 : namespace js {
1363 :
1364 : extern void
1365 : MarkCompartmentActive(js::StackFrame *fp);
1366 :
1367 : extern void
1368 : TraceRuntime(JSTracer *trc);
1369 :
1370 : extern JS_FRIEND_API(void)
1371 : MarkContext(JSTracer *trc, JSContext *acx);
1372 :
1373 : /* Must be called with GC lock taken. */
1374 : extern void
1375 : TriggerGC(JSRuntime *rt, js::gcreason::Reason reason);
1376 :
1377 : /* Must be called with GC lock taken. */
1378 : extern void
1379 : TriggerCompartmentGC(JSCompartment *comp, js::gcreason::Reason reason);
1380 :
1381 : extern void
1382 : MaybeGC(JSContext *cx);
1383 :
1384 : extern void
1385 : ShrinkGCBuffers(JSRuntime *rt);
1386 :
1387 : /*
1388 : * Kinds of js_GC invocation.
1389 : */
1390 : typedef enum JSGCInvocationKind {
1391 : /* Normal invocation. */
1392 : GC_NORMAL = 0,
1393 :
1394 : /* Minimize GC triggers and release empty GC chunks right away. */
1395 : GC_SHRINK = 1
1396 : } JSGCInvocationKind;
1397 :
1398 : /* Pass NULL for |comp| to get a full GC. */
1399 : extern void
1400 : GC(JSContext *cx, JSCompartment *comp, JSGCInvocationKind gckind, js::gcreason::Reason reason);
1401 :
1402 : extern void
1403 : GCSlice(JSContext *cx, JSCompartment *comp, JSGCInvocationKind gckind, js::gcreason::Reason reason);
1404 :
1405 : extern void
1406 : GCDebugSlice(JSContext *cx, int64_t objCount);
1407 :
1408 : } /* namespace js */
1409 :
1410 : namespace js {
1411 :
1412 : void
1413 : InitTracer(JSTracer *trc, JSRuntime *rt, JSTraceCallback callback);
1414 :
1415 : #ifdef JS_THREADSAFE
1416 :
1417 19908 : class GCHelperThread {
1418 : enum State {
1419 : IDLE,
1420 : SWEEPING,
1421 : ALLOCATING,
1422 : CANCEL_ALLOCATION,
1423 : SHUTDOWN
1424 : };
1425 :
1426 : /*
1427 : * During the finalization we do not free immediately. Rather we add the
1428 : * corresponding pointers to a buffer which we later release on a
1429 : * separated thread.
1430 : *
1431 : * The buffer is implemented as a vector of 64K arrays of pointers, not as
1432 : * a simple vector, to avoid realloc calls during the vector growth and to
1433 : * not bloat the binary size of the inlined freeLater method. Any OOM
1434 : * during buffer growth results in the pointer being freed immediately.
1435 : */
1436 : static const size_t FREE_ARRAY_SIZE = size_t(1) << 16;
1437 : static const size_t FREE_ARRAY_LENGTH = FREE_ARRAY_SIZE / sizeof(void *);
1438 :
1439 : JSRuntime *const rt;
1440 : PRThread *thread;
1441 : PRCondVar *wakeup;
1442 : PRCondVar *done;
1443 : volatile State state;
1444 :
1445 : JSContext *finalizationContext;
1446 : bool shrinkFlag;
1447 :
1448 : Vector<void **, 16, js::SystemAllocPolicy> freeVector;
1449 : void **freeCursor;
1450 : void **freeCursorEnd;
1451 :
1452 : Vector<js::gc::ArenaHeader *, 64, js::SystemAllocPolicy> finalizeVector;
1453 :
1454 : bool backgroundAllocation;
1455 :
1456 : friend struct js::gc::ArenaLists;
1457 :
1458 : JS_FRIEND_API(void)
1459 : replenishAndFreeLater(void *ptr);
1460 :
1461 22779 : static void freeElementsAndArray(void **array, void **end) {
1462 22779 : JS_ASSERT(array <= end);
1463 2851968 : for (void **p = array; p != end; ++p)
1464 2829189 : js::Foreground::free_(*p);
1465 22779 : js::Foreground::free_(array);
1466 22779 : }
1467 :
1468 : static void threadMain(void* arg);
1469 : void threadLoop();
1470 :
1471 : /* Must be called with the GC lock taken. */
1472 : void doSweep();
1473 :
1474 : public:
1475 19910 : GCHelperThread(JSRuntime *rt)
1476 : : rt(rt),
1477 : thread(NULL),
1478 : wakeup(NULL),
1479 : done(NULL),
1480 : state(IDLE),
1481 : finalizationContext(NULL),
1482 : shrinkFlag(false),
1483 : freeCursor(NULL),
1484 : freeCursorEnd(NULL),
1485 19910 : backgroundAllocation(true)
1486 19910 : { }
1487 :
1488 : bool init();
1489 : void finish();
1490 :
1491 : /* Must be called with the GC lock taken. */
1492 : void startBackgroundSweep(JSContext *cx, bool shouldShrink);
1493 :
1494 : /* Must be called with the GC lock taken. */
1495 : void startBackgroundShrink();
1496 :
1497 : /* Must be called with the GC lock taken. */
1498 : void waitBackgroundSweepEnd();
1499 :
1500 : /* Must be called with the GC lock taken. */
1501 : void waitBackgroundSweepOrAllocEnd();
1502 :
1503 : /* Must be called with the GC lock taken. */
1504 : inline void startBackgroundAllocationIfIdle();
1505 :
1506 46233 : bool canBackgroundAllocate() const {
1507 46233 : return backgroundAllocation;
1508 : }
1509 :
1510 : void disableBackgroundAllocation() {
1511 : backgroundAllocation = false;
1512 : }
1513 :
1514 : PRThread *getThread() const {
1515 : return thread;
1516 : }
1517 :
1518 : /*
1519 : * Outside the GC lock may give true answer when in fact the sweeping has
1520 : * been done.
1521 : */
1522 7943653 : bool sweeping() const {
1523 7943653 : return state == SWEEPING;
1524 : }
1525 :
1526 : bool shouldShrink() const {
1527 : JS_ASSERT(sweeping());
1528 : return shrinkFlag;
1529 : }
1530 :
1531 2829189 : void freeLater(void *ptr) {
1532 2829189 : JS_ASSERT(!sweeping());
1533 2829189 : if (freeCursor != freeCursorEnd)
1534 2806410 : *freeCursor++ = ptr;
1535 : else
1536 22779 : replenishAndFreeLater(ptr);
1537 2829189 : }
1538 :
1539 : /* Must be called with the GC lock taken. */
1540 : bool prepareForBackgroundSweep();
1541 : };
1542 :
1543 : #endif /* JS_THREADSAFE */
1544 :
1545 : struct GCChunkHasher {
1546 : typedef gc::Chunk *Lookup;
1547 :
1548 : /*
1549 : * Strip zeros for better distribution after multiplying by the golden
1550 : * ratio.
1551 : */
1552 215119814 : static HashNumber hash(gc::Chunk *chunk) {
1553 215119814 : JS_ASSERT(!(uintptr_t(chunk) & gc::ChunkMask));
1554 215119814 : return HashNumber(uintptr_t(chunk) >> gc::ChunkShift);
1555 : }
1556 :
1557 11673595 : static bool match(gc::Chunk *k, gc::Chunk *l) {
1558 11673595 : JS_ASSERT(!(uintptr_t(k) & gc::ChunkMask));
1559 11673595 : JS_ASSERT(!(uintptr_t(l) & gc::ChunkMask));
1560 11673595 : return k == l;
1561 : }
1562 : };
1563 :
1564 : typedef HashSet<js::gc::Chunk *, GCChunkHasher, SystemAllocPolicy> GCChunkSet;
1565 :
1566 : template<class T>
1567 : struct MarkStack {
1568 : T *stack;
1569 : T *tos;
1570 : T *limit;
1571 :
1572 : T *ballast;
1573 : T *ballastLimit;
1574 :
1575 : size_t sizeLimit;
1576 :
1577 19910 : MarkStack(size_t sizeLimit)
1578 : : stack(NULL),
1579 : tos(NULL),
1580 : limit(NULL),
1581 : ballast(NULL),
1582 : ballastLimit(NULL),
1583 19910 : sizeLimit(sizeLimit) { }
1584 :
1585 19908 : ~MarkStack() {
1586 19908 : if (stack != ballast)
1587 0 : js_free(stack);
1588 19908 : js_free(ballast);
1589 19908 : }
1590 :
1591 19910 : bool init(size_t ballastcap) {
1592 19910 : JS_ASSERT(!stack);
1593 :
1594 19910 : if (ballastcap == 0)
1595 0 : return true;
1596 :
1597 19910 : ballast = (T *)js_malloc(sizeof(T) * ballastcap);
1598 19910 : if (!ballast)
1599 0 : return false;
1600 19910 : ballastLimit = ballast + ballastcap;
1601 19910 : initFromBallast();
1602 19910 : return true;
1603 : }
1604 :
1605 73800 : void initFromBallast() {
1606 73800 : stack = ballast;
1607 73800 : limit = ballastLimit;
1608 73800 : if (size_t(limit - stack) > sizeLimit)
1609 0 : limit = stack + sizeLimit;
1610 73800 : tos = stack;
1611 73800 : }
1612 :
1613 0 : void setSizeLimit(size_t size) {
1614 0 : JS_ASSERT(isEmpty());
1615 :
1616 0 : sizeLimit = size;
1617 0 : reset();
1618 0 : }
1619 :
1620 19320797 : bool push(T item) {
1621 19320797 : if (tos == limit) {
1622 18 : if (!enlarge())
1623 0 : return false;
1624 : }
1625 19320797 : JS_ASSERT(tos < limit);
1626 19320797 : *tos++ = item;
1627 19320797 : return true;
1628 : }
1629 :
1630 46072024 : bool push(T item1, T item2, T item3) {
1631 46072024 : T *nextTos = tos + 3;
1632 46072024 : if (nextTos > limit) {
1633 0 : if (!enlarge())
1634 0 : return false;
1635 0 : nextTos = tos + 3;
1636 : }
1637 46072024 : JS_ASSERT(nextTos <= limit);
1638 46072024 : tos[0] = item1;
1639 46072024 : tos[1] = item2;
1640 46072024 : tos[2] = item3;
1641 46072024 : tos = nextTos;
1642 46072024 : return true;
1643 : }
1644 :
1645 223488169 : bool isEmpty() const {
1646 223488169 : return tos == stack;
1647 : }
1648 :
1649 157532845 : T pop() {
1650 157532845 : JS_ASSERT(!isEmpty());
1651 157532845 : return *--tos;
1652 : }
1653 :
1654 8981314 : ptrdiff_t position() const {
1655 8981314 : return tos - stack;
1656 : }
1657 :
1658 53890 : void reset() {
1659 53890 : if (stack != ballast)
1660 18 : js_free(stack);
1661 53890 : initFromBallast();
1662 53890 : JS_ASSERT(stack == ballast);
1663 53890 : }
1664 :
1665 18 : bool enlarge() {
1666 18 : size_t tosIndex = tos - stack;
1667 18 : size_t cap = limit - stack;
1668 18 : if (cap == sizeLimit)
1669 0 : return false;
1670 18 : size_t newcap = cap * 2;
1671 18 : if (newcap == 0)
1672 0 : newcap = 32;
1673 18 : if (newcap > sizeLimit)
1674 0 : newcap = sizeLimit;
1675 :
1676 : T *newStack;
1677 18 : if (stack == ballast) {
1678 18 : newStack = (T *)js_malloc(sizeof(T) * newcap);
1679 18 : if (!newStack)
1680 0 : return false;
1681 589860 : for (T *src = stack, *dst = newStack; src < tos; )
1682 589824 : *dst++ = *src++;
1683 : } else {
1684 0 : newStack = (T *)js_realloc(stack, sizeof(T) * newcap);
1685 0 : if (!newStack)
1686 0 : return false;
1687 : }
1688 18 : stack = newStack;
1689 18 : tos = stack + tosIndex;
1690 18 : limit = newStack + newcap;
1691 18 : return true;
1692 : }
1693 :
1694 3 : size_t sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf) const {
1695 3 : size_t n = 0;
1696 3 : if (stack != ballast)
1697 0 : n += mallocSizeOf(stack);
1698 3 : n += mallocSizeOf(ballast);
1699 3 : return n;
1700 : }
1701 : };
1702 :
1703 : /*
1704 : * This class records how much work has been done in a given GC slice, so that
1705 : * we can return before pausing for too long. Some slices are allowed to run for
1706 : * unlimited time, and others are bounded. To reduce the number of gettimeofday
1707 : * calls, we only check the time every 1000 operations.
1708 : */
1709 : struct SliceBudget {
1710 : int64_t deadline; /* in microseconds */
1711 : intptr_t counter;
1712 :
1713 : static const intptr_t CounterReset = 1000;
1714 :
1715 : static const int64_t Unlimited = 0;
1716 : static int64_t TimeBudget(int64_t millis);
1717 : static int64_t WorkBudget(int64_t work);
1718 :
1719 : /* Equivalent to SliceBudget(UnlimitedBudget). */
1720 : SliceBudget();
1721 :
1722 : /* Instantiate as SliceBudget(Time/WorkBudget(n)). */
1723 : SliceBudget(int64_t budget);
1724 :
1725 0 : void reset() {
1726 0 : deadline = INT64_MAX;
1727 0 : counter = INTPTR_MAX;
1728 0 : }
1729 :
1730 54233629 : void step() {
1731 54233629 : counter--;
1732 54233629 : }
1733 :
1734 : bool checkOverBudget();
1735 :
1736 119724928 : bool isOverBudget() {
1737 119724928 : if (counter > 0)
1738 119724928 : return false;
1739 0 : return checkOverBudget();
1740 : }
1741 : };
1742 :
1743 : static const size_t MARK_STACK_LENGTH = 32768;
1744 :
1745 19908 : struct GCMarker : public JSTracer {
1746 : private:
1747 : /*
1748 : * We use a common mark stack to mark GC things of different types and use
1749 : * the explicit tags to distinguish them when it cannot be deduced from
1750 : * the context of push or pop operation.
1751 : */
1752 : enum StackTag {
1753 : ValueArrayTag,
1754 : ObjectTag,
1755 : TypeTag,
1756 : XmlTag,
1757 : SavedValueArrayTag,
1758 : LastTag = SavedValueArrayTag
1759 : };
1760 :
1761 : static const uintptr_t StackTagMask = 7;
1762 :
1763 : static void staticAsserts() {
1764 : JS_STATIC_ASSERT(StackTagMask >= uintptr_t(LastTag));
1765 : JS_STATIC_ASSERT(StackTagMask <= gc::Cell::CellMask);
1766 : }
1767 :
1768 : public:
1769 : explicit GCMarker();
1770 : bool init();
1771 :
1772 0 : void setSizeLimit(size_t size) { stack.setSizeLimit(size); }
1773 0 : size_t sizeLimit() const { return stack.sizeLimit; }
1774 :
1775 : void start(JSRuntime *rt);
1776 : void stop();
1777 : void reset();
1778 :
1779 17216759 : void pushObject(JSObject *obj) {
1780 17216759 : pushTaggedPtr(ObjectTag, obj);
1781 17216759 : }
1782 :
1783 2101514 : void pushType(types::TypeObject *type) {
1784 2101514 : pushTaggedPtr(TypeTag, type);
1785 2101514 : }
1786 :
1787 2522 : void pushXML(JSXML *xml) {
1788 2522 : pushTaggedPtr(XmlTag, xml);
1789 2522 : }
1790 :
1791 416657168 : uint32_t getMarkColor() const {
1792 416657168 : return color;
1793 : }
1794 :
1795 : /*
1796 : * The only valid color transition during a GC is from black to gray. It is
1797 : * wrong to switch the mark color from gray to black. The reason is that the
1798 : * cycle collector depends on the invariant that there are no black to gray
1799 : * edges in the GC heap. This invariant lets the CC not trace through black
1800 : * objects. If this invariant is violated, the cycle collector may free
1801 : * objects that are still reachable.
1802 : */
1803 51092 : void setMarkColorGray() {
1804 51092 : JS_ASSERT(isDrained());
1805 51092 : JS_ASSERT(color == gc::BLACK);
1806 51092 : color = gc::GRAY;
1807 51092 : }
1808 :
1809 : inline void delayMarkingArena(gc::ArenaHeader *aheader);
1810 : void delayMarkingChildren(const void *thing);
1811 : void markDelayedChildren(gc::ArenaHeader *aheader);
1812 : bool markDelayedChildren(SliceBudget &budget);
1813 102504 : bool hasDelayedChildren() const {
1814 102504 : return !!unmarkedArenaStackTop;
1815 : }
1816 :
1817 462626 : bool isDrained() {
1818 462626 : return isMarkStackEmpty() && !unmarkedArenaStackTop;
1819 : }
1820 :
1821 : bool drainMarkStack(SliceBudget &budget);
1822 :
1823 : /*
1824 : * Gray marking must be done after all black marking is complete. However,
1825 : * we do not have write barriers on XPConnect roots. Therefore, XPConnect
1826 : * roots must be accumulated in the first slice of incremental GC. We
1827 : * accumulate these roots in the GrayRootMarker and then mark them later,
1828 : * after black marking is complete. This accumulation can fail, but in that
1829 : * case we switch to non-incremental GC.
1830 : */
1831 : bool hasBufferedGrayRoots() const;
1832 : void startBufferingGrayRoots();
1833 : void endBufferingGrayRoots();
1834 : void markBufferedGrayRoots();
1835 :
1836 : static void GrayCallback(JSTracer *trc, void **thing, JSGCTraceKind kind);
1837 :
1838 : size_t sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf) const;
1839 :
1840 : MarkStack<uintptr_t> stack;
1841 :
1842 : private:
1843 : #ifdef DEBUG
1844 : void checkCompartment(void *p);
1845 : #else
1846 : void checkCompartment(void *p) {}
1847 : #endif
1848 :
1849 19320795 : void pushTaggedPtr(StackTag tag, void *ptr) {
1850 19320795 : checkCompartment(ptr);
1851 19320795 : uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
1852 19320795 : JS_ASSERT(!(addr & StackTagMask));
1853 19320795 : if (!stack.push(addr | uintptr_t(tag)))
1854 0 : delayMarkingChildren(ptr);
1855 19320795 : }
1856 :
1857 46072024 : void pushValueArray(JSObject *obj, void *start, void *end) {
1858 46072024 : checkCompartment(obj);
1859 :
1860 46072024 : JS_ASSERT(start <= end);
1861 46072024 : uintptr_t tagged = reinterpret_cast<uintptr_t>(obj) | GCMarker::ValueArrayTag;
1862 46072024 : uintptr_t startAddr = reinterpret_cast<uintptr_t>(start);
1863 46072024 : uintptr_t endAddr = reinterpret_cast<uintptr_t>(end);
1864 :
1865 : /*
1866 : * Push in the reverse order so obj will be on top. If we cannot push
1867 : * the array, we trigger delay marking for the whole object.
1868 : */
1869 46072024 : if (!stack.push(endAddr, startAddr, tagged))
1870 0 : delayMarkingChildren(obj);
1871 46072024 : }
1872 :
1873 464025 : bool isMarkStackEmpty() {
1874 464025 : return stack.isEmpty();
1875 : }
1876 :
1877 : bool restoreValueArray(JSObject *obj, void **vpp, void **endp);
1878 : void saveValueRanges();
1879 : inline void processMarkStackTop(SliceBudget &budget);
1880 : void processMarkStackOther(uintptr_t tag, uintptr_t addr);
1881 :
1882 : void appendGrayRoot(void *thing, JSGCTraceKind kind);
1883 :
1884 : /* The color is only applied to objects, functions and xml. */
1885 : uint32_t color;
1886 :
1887 : DebugOnly<bool> started;
1888 :
1889 : /* Pointer to the top of the stack of arenas we are delaying marking on. */
1890 : js::gc::ArenaHeader *unmarkedArenaStackTop;
1891 : /* Count of arenas that are currently in the stack. */
1892 : DebugOnly<size_t> markLaterArenas;
1893 :
1894 467078 : struct GrayRoot {
1895 : void *thing;
1896 : JSGCTraceKind kind;
1897 : #ifdef DEBUG
1898 : JSTraceNamePrinter debugPrinter;
1899 : const void *debugPrintArg;
1900 : size_t debugPrintIndex;
1901 : #endif
1902 :
1903 199525 : GrayRoot(void *thing, JSGCTraceKind kind)
1904 199525 : : thing(thing), kind(kind) {}
1905 : };
1906 :
1907 : bool grayFailed;
1908 : Vector<GrayRoot, 0, SystemAllocPolicy> grayRoots;
1909 : };
1910 :
1911 : void
1912 : SetMarkStackLimit(JSRuntime *rt, size_t limit);
1913 :
1914 : void
1915 : MarkStackRangeConservatively(JSTracer *trc, Value *begin, Value *end);
1916 :
1917 : typedef void (*IterateChunkCallback)(JSRuntime *rt, void *data, gc::Chunk *chunk);
1918 : typedef void (*IterateArenaCallback)(JSRuntime *rt, void *data, gc::Arena *arena,
1919 : JSGCTraceKind traceKind, size_t thingSize);
1920 : typedef void (*IterateCellCallback)(JSRuntime *rt, void *data, void *thing,
1921 : JSGCTraceKind traceKind, size_t thingSize);
1922 :
1923 : /*
1924 : * This function calls |compartmentCallback| on every compartment,
1925 : * |arenaCallback| on every in-use arena, and |cellCallback| on every in-use
1926 : * cell in the GC heap.
1927 : */
1928 : extern JS_FRIEND_API(void)
1929 : IterateCompartmentsArenasCells(JSRuntime *rt, void *data,
1930 : JSIterateCompartmentCallback compartmentCallback,
1931 : IterateArenaCallback arenaCallback,
1932 : IterateCellCallback cellCallback);
1933 :
1934 : /*
1935 : * Invoke chunkCallback on every in-use chunk.
1936 : */
1937 : extern JS_FRIEND_API(void)
1938 : IterateChunks(JSRuntime *rt, void *data, IterateChunkCallback chunkCallback);
1939 :
1940 : /*
1941 : * Invoke cellCallback on every in-use object of the specified thing kind for
1942 : * the given compartment or for all compartments if it is null.
1943 : */
1944 : extern JS_FRIEND_API(void)
1945 : IterateCells(JSRuntime *rt, JSCompartment *compartment, gc::AllocKind thingKind,
1946 : void *data, IterateCellCallback cellCallback);
1947 :
1948 : } /* namespace js */
1949 :
1950 : extern void
1951 : js_FinalizeStringRT(JSRuntime *rt, JSString *str);
1952 :
1953 : /*
1954 : * Macro to test if a traversal is the marking phase of the GC.
1955 : */
1956 : #define IS_GC_MARKING_TRACER(trc) \
1957 : ((trc)->callback == NULL || (trc)->callback == GCMarker::GrayCallback)
1958 :
1959 : namespace js {
1960 : namespace gc {
1961 :
1962 : JSCompartment *
1963 : NewCompartment(JSContext *cx, JSPrincipals *principals);
1964 :
1965 : /* Tries to run a GC no matter what (used for GC zeal). */
1966 : void
1967 : RunDebugGC(JSContext *cx);
1968 :
1969 : void
1970 : SetDeterministicGC(JSContext *cx, bool enabled);
1971 :
1972 : #if defined(JSGC_ROOT_ANALYSIS) && defined(DEBUG) && !defined(JS_THREADSAFE)
1973 : /* Overwrites stack references to GC things which have not been rooted. */
1974 : void CheckStackRoots(JSContext *cx);
1975 :
1976 : inline void MaybeCheckStackRoots(JSContext *cx) { CheckStackRoots(cx); }
1977 : #else
1978 245571019 : inline void MaybeCheckStackRoots(JSContext *cx) {}
1979 : #endif
1980 :
1981 : const int ZealPokeValue = 1;
1982 : const int ZealAllocValue = 2;
1983 : const int ZealFrameGCValue = 3;
1984 : const int ZealVerifierValue = 4;
1985 : const int ZealFrameVerifierValue = 5;
1986 :
1987 : #ifdef JS_GC_ZEAL
1988 :
1989 : /* Check that write barriers have been used correctly. See jsgc.cpp. */
1990 : void
1991 : VerifyBarriers(JSContext *cx);
1992 :
1993 : void
1994 : MaybeVerifyBarriers(JSContext *cx, bool always = false);
1995 :
1996 : #else
1997 :
1998 : static inline void
1999 : VerifyBarriers(JSContext *cx)
2000 : {
2001 : }
2002 :
2003 : static inline void
2004 : MaybeVerifyBarriers(JSContext *cx, bool always = false)
2005 : {
2006 : }
2007 :
2008 : #endif
2009 :
2010 : } /* namespace gc */
2011 :
2012 : static inline JSCompartment *
2013 : GetObjectCompartment(JSObject *obj) { return reinterpret_cast<js::gc::Cell *>(obj)->compartment(); }
2014 :
2015 : } /* namespace js */
2016 :
2017 : #endif /* jsgc_h___ */
|