Backed out changeset d89d4281fd8d (bug 933313) for bustage on a CLOSED TREE

This commit is contained in:
Carsten "Tomcat" Book 2014-06-06 10:18:04 +02:00
parent 032f277d67
commit 352f40fc4e
48 changed files with 204 additions and 2335 deletions

View File

@ -55,23 +55,11 @@ static const uint32_t BLACK = 0;
static const uint32_t GRAY = 1;
/*
* The "location" field in the Chunk trailer is a bit vector indicting various
* roles of the chunk.
*
* The value 0 for the "location" field is invalid, at least one bit must be
* set.
*
* Some bits preclude others, for example, any "nursery" bit precludes any
* "tenured" or "middle generation" bit.
* Constants used to indicate whether a chunk is part of the tenured heap or the
* nusery.
*/
const uintptr_t ChunkLocationBitNursery = 1; // Standard GGC nursery
const uintptr_t ChunkLocationBitTenuredHeap = 2; // Standard GGC tenured generation
const uintptr_t ChunkLocationBitPJSNewspace = 4; // The PJS generational GC's allocation space
const uintptr_t ChunkLocationBitPJSFromspace = 8; // The PJS generational GC's fromspace (during GC)
const uintptr_t ChunkLocationAnyNursery = ChunkLocationBitNursery |
ChunkLocationBitPJSNewspace |
ChunkLocationBitPJSFromspace;
const uint32_t ChunkLocationNursery = 0;
const uint32_t ChunkLocationTenuredHeap = 1;
#ifdef JS_DEBUG
/* When downcasting, ensure we are actually the right type. */
@ -237,8 +225,9 @@ IsInsideNursery(const js::gc::Cell *cell)
addr &= ~js::gc::ChunkMask;
addr |= js::gc::ChunkLocationOffset;
uint32_t location = *reinterpret_cast<uint32_t *>(addr);
JS_ASSERT(location != 0);
return location & ChunkLocationAnyNursery;
JS_ASSERT(location == gc::ChunkLocationNursery ||
location == gc::ChunkLocationTenuredHeap);
return location == gc::ChunkLocationNursery;
#else
return false;
#endif

View File

@ -47,7 +47,6 @@ namespace js {}
#define JS_ALLOCATED_TENURED_PATTERN 0x4D
#define JS_SWEPT_CODE_PATTERN 0x3b
#define JS_SWEPT_FRAME_PATTERN 0x5b
#define JS_POISONED_FORKJOIN_CHUNK 0xBD
#define JS_ASSERT(expr) MOZ_ASSERT(expr)
#define JS_ASSERT_IF(cond, expr) MOZ_ASSERT_IF(cond, expr)

View File

@ -692,7 +692,7 @@ function ArrayMapPar(func, mode) {
break parallel;
var slicesInfo = ComputeSlicesInfo(length);
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode), buffer);
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode));
return buffer;
}
@ -741,7 +741,7 @@ function ArrayReducePar(func, mode) {
var numSlices = slicesInfo.count;
var subreductions = NewDenseArray(numSlices);
ForkJoin(reduceThread, 0, numSlices, ForkJoinMode(mode), null);
ForkJoin(reduceThread, 0, numSlices, ForkJoinMode(mode));
var accumulator = subreductions[0];
for (var i = 1; i < numSlices; i++)
@ -800,7 +800,7 @@ function ArrayScanPar(func, mode) {
var numSlices = slicesInfo.count;
// Scan slices individually (see comment on phase1()).
ForkJoin(phase1, 0, numSlices, ForkJoinMode(mode), buffer);
ForkJoin(phase1, 0, numSlices, ForkJoinMode(mode));
// Compute intermediates array (see comment on phase2()).
var intermediates = [];
@ -816,7 +816,7 @@ function ArrayScanPar(func, mode) {
// We start from slice 1 instead of 0 since there is no work to be done
// for slice 0.
if (numSlices > 1)
ForkJoin(phase2, 1, numSlices, ForkJoinMode(mode), buffer);
ForkJoin(phase2, 1, numSlices, ForkJoinMode(mode));
return buffer;
}
@ -1030,7 +1030,7 @@ function ArrayFilterPar(func, mode) {
UnsafePutElements(counts, i, 0);
var survivors = new Uint8Array(length);
ForkJoin(findSurvivorsThread, 0, numSlices, ForkJoinMode(mode), survivors);
ForkJoin(findSurvivorsThread, 0, numSlices, ForkJoinMode(mode));
// Step 2. Compress the slices into one contiguous set.
var count = 0;
@ -1038,7 +1038,7 @@ function ArrayFilterPar(func, mode) {
count += counts[i];
var buffer = NewDenseArray(count);
if (count > 0)
ForkJoin(copySurvivorsThread, 0, numSlices, ForkJoinMode(mode), buffer);
ForkJoin(copySurvivorsThread, 0, numSlices, ForkJoinMode(mode));
return buffer;
}
@ -1148,7 +1148,7 @@ function ArrayStaticBuildPar(length, func, mode) {
break parallel;
var slicesInfo = ComputeSlicesInfo(length);
ForkJoin(constructThread, 0, slicesInfo.count, ForkJoinMode(mode), buffer);
ForkJoin(constructThread, 0, slicesInfo.count, ForkJoinMode(mode));
return buffer;
}

View File

@ -1195,7 +1195,7 @@ function MapTypedParImplDepth1(inArray, inArrayType, outArrayType, func) {
// relative to its owner (which is often but not always 0).
const inBaseOffset = TYPEDOBJ_BYTEOFFSET(inArray);
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode), outArray);
ForkJoin(mapThread, 0, slicesInfo.count, ForkJoinMode(mode));
return outArray;
function mapThread(workerId, sliceStart, sliceEnd) {
@ -1251,17 +1251,11 @@ function MapTypedParImplDepth1(inArray, inArrayType, outArrayType, func) {
inOffset += inGrainTypeSize;
outOffset += outGrainTypeSize;
#ifndef JSGC_FJGENERATIONAL
// A transparent result type cannot contain references, and
// hence there is no way for a pointer to a thread-local object
// to escape.
//
// This has been disabled for the PJS generational collector
// as it probably has little effect in that setting and adds
// per-iteration cost.
if (outGrainTypeIsTransparent)
ClearThreadLocalArenas();
#endif
}
}

View File

@ -3199,8 +3199,6 @@ MOZ_ARG_DISABLE_BOOL(gcgenerational,
if test -n "$JSGC_GENERATIONAL"; then
AC_DEFINE(JSGC_GENERATIONAL)
fi
JSGC_GENERATIONAL_CONFIGURED=$JSGC_GENERATIONAL
AC_SUBST(JSGC_GENERATIONAL_CONFIGURED)
dnl ========================================================
dnl = Use exact stack rooting for GC

View File

@ -1,85 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef gc_ForkJoinNursery_inl_h
#define gc_ForkJoinNursery_inl_h
#ifdef JSGC_FJGENERATIONAL
#include "gc/ForkJoinNursery.h"
namespace js {
namespace gc {
// For the following two predicates we can't check the attributes on
// the chunk trailer because it's not known whether addr points into a
// chunk.
//
// A couple of optimizations are possible if performance is an issue:
//
// - The loop can be unrolled, and we can arrange for all array entries
// to be valid for this purpose so that the bound is constant.
// - The per-chunk test can be reduced to testing whether the high bits
// of the object pointer and the high bits of the chunk pointer are
// the same (and the latter value is essentially space[i]).
// Note, experiments with that do not show an improvement yet.
// - Taken together, those optimizations yield code that is one LOAD,
// one XOR, and one AND for each chunk, with the result being true
// iff the resulting value is zero.
// - We can have multiple versions of the predicates, and those that
// take known-good GCThing types can go directly to the attributes;
// it may be possible to ensure that more calls use GCThing types.
// Note, this requires the worker ID to be part of the chunk
// attribute bit vector.
//
// Performance may not be an issue as there may be few survivors of a
// collection in the ForkJoinNursery and few objects will be tested.
// If so then the bulk of the calls may come from the code that scans
// the roots. Behavior will be workload-dependent however.
MOZ_ALWAYS_INLINE bool
ForkJoinNursery::isInsideNewspace(const void *addr)
{
uintptr_t p = reinterpret_cast<uintptr_t>(addr);
for (unsigned i = 0 ; i <= currentChunk_ ; i++) {
if (p >= newspace[i]->start() && p < newspace[i]->end())
return true;
}
return false;
}
MOZ_ALWAYS_INLINE bool
ForkJoinNursery::isInsideFromspace(const void *addr)
{
uintptr_t p = reinterpret_cast<uintptr_t>(addr);
for (unsigned i = 0 ; i < numFromspaceChunks_ ; i++) {
if (p >= fromspace[i]->start() && p < fromspace[i]->end())
return true;
}
return false;
}
template <typename T>
MOZ_ALWAYS_INLINE bool
ForkJoinNursery::getForwardedPointer(T **ref)
{
JS_ASSERT(ref);
JS_ASSERT(isInsideFromspace(*ref));
const RelocationOverlay *overlay = reinterpret_cast<const RelocationOverlay *>(*ref);
if (!overlay->isForwarded())
return false;
// This static_cast from Cell* restricts T to valid (GC thing) types.
*ref = static_cast<T *>(overlay->forwardingAddress());
return true;
}
} // namespace gc
} // namespace js
#endif // JSGC_FJGENERATIONAL
#endif // gc_ForkJoinNursery_inl_h

View File

@ -1,907 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifdef JSGC_FJGENERATIONAL
#include "gc/ForkJoinNursery-inl.h"
#include "mozilla/IntegerPrintfMacros.h"
#include "prmjtime.h"
#include "gc/Heap.h"
#include "jit/IonFrames.h"
#include "vm/ArrayObject.h"
#include "vm/ForkJoin.h"
#include "vm/TypedArrayObject.h"
#include "jsgcinlines.h"
#include "gc/Nursery-inl.h"
// The ForkJoinNursery provides an object nursery for movable object
// types for one ForkJoin worker thread. There is a one-to-one
// correspondence between ForkJoinNursery and ForkJoinContext.
//
// For a general overview of how the ForkJoinNursery fits into the
// overall PJS system, see the comment block in vm/ForkJoin.h.
//
//
// Invariants on the ForkJoinNursery:
//
// Let "the tenured area" from the point of view of one
// ForkJoinNursery comprise the global tenured area and the nursery's
// owning worker's private tenured area. Then:
//
// - There can be pointers from the tenured area into a ForkJoinNursery,
// and from the ForkJoinNursery into the tenured area
//
// - There *cannot* be a pointer from one ForkJoinNursery into
// another, or from one private tenured area into another, or from a
// ForkJoinNursery into another worker's private tenured are or vice
// versa, or from any ForkJoinNursery or private tenured area into
// the normal Nursery.
//
// For those invariants to hold the normal Nursery must be empty before
// a ForkJoin section.
//
//
// General description:
//
// The nursery maintains a space into which small, movable objects
// are allocated. Other objects are allocated directly in the private
// tenured area for the worker.
//
// If an allocation request can't be satisfied because the nursery is
// full then a /minor collection/ is triggered without bailouts. This
// collection copies nursery-allocated objects reachable from the
// worker's roots into a fresh space. Then the old space is
// discarded.
//
// Nurseries are maintained in 1MB chunks. If the live data in a
// nursery after a collection exceeds some set fraction (currently
// 1/3) then the nursery is grown, independently of other nurseries.
//
// There is an upper limit on the number of chunks in a nursery. If
// the live data in a nursery after a collection exceeds the set
// fraction and the nursery can't grow, then the next collection will
// be an /evacuating collection/.
//
// An evacuating collection copies nursery-allocated objects reachable
// from the worker's roots into the worker's private tenured area.
//
// If an allocation request in the tenured area - whether the request
// comes from the mutator or from the garbage collector during
// evacuation - can't be satisified because the tenured area is full,
// then the worker bails out and triggers a full collection in the
// ForkJoin worker's zone. This is expected to happen very rarely in
// practice.
//
// The roots for a collection in the ForkJoinNursery are: the frames
// of the execution stack, any registered roots on the execution
// stack, any objects in the private tenured area, and the ForkJoin
// result object in the common tenured area.
//
// The entire private tenured area is considered to be rooted in order
// not to have to run write barriers during the ForkJoin section.
// During a minor or evacuating collection in a worker the GC will
// step through the worker's tenured area, examining each object for
// pointers into the nursery.
//
// The ForkJoinNursery contains its own object tracing machinery for
// most of the types that can be allocated in the nursery. But it
// does not handle all types, and there are two places where the code
// in ForkJoinNursery loses control of the tracing:
//
// - When calling clasp->trace() in traceObject()
// - When calling MarkForkJoinStack() in forwardFromStack()
//
// In both cases:
//
// - We pass a ForkJoinNurseryCollectionTracer object with a callback
// to ForkJoinNursery::MinorGCCallback
//
// - We should only ever end up in MarkInternal() in Marking.cpp, in
// the case in that code that calls back to trc->callback. We
// should /never/ end up in functions that trigger use of the mark
// stack internal to the general GC's marker.
//
// - Any function along the path to MarkInternal() that asks about
// whether something is in the nursery or is tenured /must/ be aware
// that there can be multiple nursery and tenured areas; assertions
// get this wrong a lot of the time and must be fixed when they do.
// In practice, such code either must have a case for each nursery
// kind or must use the IsInsideNursery(Cell*) method, which looks
// only at the chunk tag.
//
//
// Terminological note:
//
// - While the mutator is running it is allocating in what's known as
// the nursery's "newspace". The mutator may also allocate directly
// in the tenured space, but the tenured space is not part of the
// newspace.
//
// - While the gc is running, the previous "newspace" has been renamed
// as the gc's "fromspace", and the space that objects are copied
// into is known as the "tospace". The tospace may be a nursery
// space (during a minor collection), or it may be a tenured space
// (during an evacuation collection), but it's always one or the
// other, never a combination. After gc the fromspace is always
// discarded.
//
// - If the gc copies objects into a nursery tospace then this tospace
// becomes known as the "newspace" following gc. Otherwise, a new
// newspace won't be needed (if the parallel section is finished) or
// can be created empty (if the gc just needed to evacuate).
//
//
// Style note:
//
// - Use js_memcpy, malloc_, realloc_, and js_free uniformly, do not
// use PodCopy or pod_malloc: the type information for the latter is
// not always correct and surrounding code usually operates in terms
// of bytes, anyhow.
//
// With power comes responsibility, etc: code that used pod_malloc
// gets safe size computation built-in; here we must handle that
// manually.
namespace js {
namespace gc {
ForkJoinNursery::ForkJoinNursery(ForkJoinContext *cx, ForkJoinGCShared *shared, Allocator *tenured)
: cx_(cx)
, tenured_(tenured)
, shared_(shared)
, evacuationZone_(nullptr)
, currentStart_(0)
, currentEnd_(0)
, position_(0)
, currentChunk_(0)
, numActiveChunks_(0)
, numFromspaceChunks_(0)
, mustEvacuate_(false)
, isEvacuating_(false)
, movedSize_(0)
, head_(nullptr)
, tail_(&head_)
, hugeSlotsNew(0)
, hugeSlotsFrom(1)
{
for ( size_t i=0 ; i < MaxNurseryChunks ; i++ ) {
newspace[i] = nullptr;
fromspace[i] = nullptr;
}
if (!hugeSlots[hugeSlotsNew].init() || !hugeSlots[hugeSlotsFrom].init())
CrashAtUnhandlableOOM("Cannot initialize PJS nursery");
initNewspace(); // This can fail to return
}
ForkJoinNursery::~ForkJoinNursery()
{
for ( size_t i=0 ; i < numActiveChunks_ ; i++ ) {
if (newspace[i])
shared_->freeNurseryChunk(newspace[i]);
}
}
void
ForkJoinNursery::minorGC()
{
if (mustEvacuate_) {
mustEvacuate_ = false;
pjsCollection(Evacuate|Recreate);
} else {
pjsCollection(Collect|Recreate);
}
}
void
ForkJoinNursery::evacuatingGC()
{
pjsCollection(Evacuate);
}
#define TIME_START(name) int64_t timstampStart_##name = PRMJ_Now()
#define TIME_END(name) int64_t timstampEnd_##name = PRMJ_Now()
#define TIME_TOTAL(name) (timstampEnd_##name - timstampStart_##name)
void
ForkJoinNursery::pjsCollection(int op)
{
JS_ASSERT((op & Collect) != (op & Evacuate));
bool evacuate = op & Evacuate;
bool recreate = op & Recreate;
JS_ASSERT(!isEvacuating_);
JS_ASSERT(!evacuationZone_);
JS_ASSERT(!head_);
JS_ASSERT(tail_ == &head_);
JSRuntime *const rt = shared_->runtime();
const unsigned currentNumActiveChunks_ = numActiveChunks_;
const char *msg = "";
JS_ASSERT(!rt->needsBarrier());
TIME_START(pjsCollection);
rt->incFJMinorCollecting();
if (evacuate) {
isEvacuating_ = true;
evacuationZone_ = shared_->zone();
}
flip();
if (recreate) {
initNewspace();
// newspace must be at least as large as fromSpace
numActiveChunks_ = currentNumActiveChunks_;
}
ForkJoinNurseryCollectionTracer trc(rt, this);
forwardFromRoots(&trc);
collectToFixedPoint(&trc);
#ifdef JS_ION
jit::UpdateJitActivationsForMinorGC(TlsPerThreadData.get(), &trc);
#endif
freeFromspace();
size_t live = movedSize_;
computeNurserySizeAfterGC(live, &msg);
sweepHugeSlots();
JS_ASSERT(hugeSlots[hugeSlotsFrom].empty());
JS_ASSERT_IF(isEvacuating_, hugeSlots[hugeSlotsNew].empty());
isEvacuating_ = false;
evacuationZone_ = nullptr;
head_ = nullptr;
tail_ = &head_;
movedSize_ = 0;
rt->decFJMinorCollecting();
TIME_END(pjsCollection);
// Note, the spew is awk-friendly, non-underlined words serve as markers:
// FJGC _tag_ us _value_ copied _value_ size _value_ _message-word_ ...
shared_->spewGC("FJGC %s us %5" PRId64 " copied %7" PRIu64 " size %" PRIu64 " %s",
(evacuate ? "evacuate " : "collect "),
TIME_TOTAL(pjsCollection),
(uint64_t)live,
(uint64_t)numActiveChunks_*1024*1024,
msg);
}
#undef TIME_START
#undef TIME_END
#undef TIME_TOTAL
void
ForkJoinNursery::computeNurserySizeAfterGC(size_t live, const char **msg)
{
// Grow the nursery if it is too full. Do not bother to shrink it - lazy
// chunk allocation means that a too-large nursery will not really be a problem,
// the entire nursery will be deallocated soon anyway.
if (live * NurseryLoadFactor > numActiveChunks_ * ForkJoinNurseryChunk::UsableSize) {
if (numActiveChunks_ < MaxNurseryChunks) {
while (numActiveChunks_ < MaxNurseryChunks &&
live * NurseryLoadFactor > numActiveChunks_ * ForkJoinNurseryChunk::UsableSize)
{
++numActiveChunks_;
}
} else {
// Evacuation will tend to drive us toward the cliff of a bailout GC, which
// is not good, probably worse than working within the thread at a higher load
// than desirable.
//
// Thus it's possible to be more sophisticated than this:
//
// - evacuate only after several minor GCs in a row exceeded the set load
// - evacuate only if significantly less space than required is available, eg,
// if only 1/2 the required free space is available
*msg = " Overfull, will evacuate next";
mustEvacuate_ = true;
}
}
}
void
ForkJoinNursery::flip()
{
size_t i;
for (i=0; i < numActiveChunks_; i++) {
if (!newspace[i])
break;
fromspace[i] = newspace[i];
newspace[i] = nullptr;
fromspace[i]->trailer.location = gc::ChunkLocationBitPJSFromspace;
}
numFromspaceChunks_ = i;
numActiveChunks_ = 0;
int tmp = hugeSlotsNew;
hugeSlotsNew = hugeSlotsFrom;
hugeSlotsFrom = tmp;
JS_ASSERT(hugeSlots[hugeSlotsNew].empty());
}
void
ForkJoinNursery::freeFromspace()
{
for (size_t i=0; i < numFromspaceChunks_; i++) {
shared_->freeNurseryChunk(fromspace[i]);
fromspace[i] = nullptr;
}
numFromspaceChunks_ = 0;
}
void
ForkJoinNursery::initNewspace()
{
JS_ASSERT(newspace[0] == nullptr);
JS_ASSERT(numActiveChunks_ == 0);
numActiveChunks_ = 1;
setCurrentChunk(0);
}
MOZ_ALWAYS_INLINE bool
ForkJoinNursery::shouldMoveObject(void **thingp)
{
// Note that thingp must really be a T** where T is some GCThing,
// ie, something that lives in a chunk (or nullptr). This should
// be the case because the MinorGCCallback is only called on exact
// roots on the stack or slots within in tenured objects and not
// on slot/element arrays that can be malloc'd; they are forwarded
// using the forwardBufferPointer() mechanism.
//
// The main reason for that restriction is so that we can call a
// method here that can check the chunk trailer for the cell (a
// future optimization).
Cell *cell = static_cast<Cell *>(*thingp);
return isInsideFromspace(cell) && !getForwardedPointer(thingp);
}
/* static */ void
ForkJoinNursery::MinorGCCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind traceKind)
{
// traceKind can be all sorts of things, when we're marking from stack roots
ForkJoinNursery *nursery = static_cast<ForkJoinNurseryCollectionTracer *>(trcArg)->nursery_;
if (nursery->shouldMoveObject(thingp)) {
// When other types of objects become nursery-allocable then the static_cast
// to JSObject * will no longer be valid.
JS_ASSERT(traceKind == JSTRACE_OBJECT);
*thingp = nursery->moveObjectToTospace(static_cast<JSObject *>(*thingp));
}
}
void
ForkJoinNursery::forwardFromRoots(ForkJoinNurseryCollectionTracer *trc)
{
// There should be no other roots as a result of effect-freedom.
forwardFromUpdatable(trc);
forwardFromStack(trc);
forwardFromTenured(trc);
}
void
ForkJoinNursery::forwardFromUpdatable(ForkJoinNurseryCollectionTracer *trc)
{
JSObject *obj = shared_->updatable();
if (obj)
traceObject(trc, obj);
}
void
ForkJoinNursery::forwardFromStack(ForkJoinNurseryCollectionTracer *trc)
{
MarkForkJoinStack(trc);
}
void
ForkJoinNursery::forwardFromTenured(ForkJoinNurseryCollectionTracer *trc)
{
JSObject *objs[ArenaCellCount];
for (size_t k=0; k < FINALIZE_LIMIT; k++) {
AllocKind kind = (AllocKind)k;
if (!IsFJNurseryAllocable(kind))
continue;
// When non-JSObject types become nursery-allocable the assumptions in the
// loops below will no longer hold; other types than JSObject must be
// handled.
JS_ASSERT(kind <= FINALIZE_OBJECT_LAST);
ArenaIter ai;
ai.init(const_cast<Allocator *>(tenured_), kind);
for (; !ai.done(); ai.next()) {
// Do the walk in two steps to avoid problems resulting from allocating
// into the arena that's being walked: ArenaCellIter is not safe for that.
// It can happen during evacuation.
//
// ArenaCellIterUnderFinalize requires any free list to be flushed into
// its arena, and since we may allocate within traceObject() we must
// purge before each arena scan. This is probably not very expensive,
// it's constant work, and inlined.
//
// Use ArenaCellIterUnderFinalize, not ...UnderGC, because that side-steps
// some assertions in the latter that are wrong for PJS collection.
size_t numObjs = 0;
tenured_->arenas.purge(kind);
for (ArenaCellIterUnderFinalize i(ai.get()); !i.done(); i.next())
objs[numObjs++] = i.get<JSObject>();
for (size_t i=0; i < numObjs; i++)
traceObject(trc, objs[i]);
}
}
}
/*static*/ void
ForkJoinNursery::forwardBufferPointer(JSTracer *trc, HeapSlot **pSlotsElems)
{
ForkJoinNursery *nursery = static_cast<ForkJoinNurseryCollectionTracer *>(trc)->nursery_;
HeapSlot *old = *pSlotsElems;
if (!nursery->isInsideFromspace(old))
return;
// If the elements buffer is zero length, the "first" item could be inside
// of the next object or past the end of the allocable area. However,
// since we always store the runtime as the last word in a nursery chunk,
// isInsideFromspace will still be true, even if this zero-size allocation
// abuts the end of the allocable area. Thus, it is always safe to read the
// first word of |old| here.
*pSlotsElems = *reinterpret_cast<HeapSlot **>(old);
JS_ASSERT(!nursery->isInsideFromspace(*pSlotsElems));
}
void
ForkJoinNursery::collectToFixedPoint(ForkJoinNurseryCollectionTracer *trc)
{
for (RelocationOverlay *p = head_; p; p = p->next())
traceObject(trc, static_cast<JSObject *>(p->forwardingAddress()));
}
inline void
ForkJoinNursery::setCurrentChunk(int index)
{
JS_ASSERT((size_t)index < numActiveChunks_);
JS_ASSERT(!newspace[index]);
currentChunk_ = index;
ForkJoinNurseryChunk *c = shared_->allocateNurseryChunk();
if (!c)
CrashAtUnhandlableOOM("Cannot expand PJS nursery");
c->trailer.runtime = shared_->runtime();
c->trailer.location = gc::ChunkLocationBitPJSNewspace;
c->trailer.storeBuffer = nullptr;
currentStart_ = c->start();
currentEnd_ = c->end();
position_ = currentStart_;
newspace[index] = c;
}
void *
ForkJoinNursery::allocate(size_t size)
{
JS_ASSERT(position_ >= currentStart_);
if (currentEnd_ - position_ < size) {
if (currentChunk_ + 1 == numActiveChunks_)
return nullptr;
setCurrentChunk(currentChunk_ + 1);
}
void *thing = reinterpret_cast<void *>(position_);
position_ += size;
JS_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
return thing;
}
JSObject *
ForkJoinNursery::allocateObject(size_t baseSize, size_t numDynamic, bool& tooLarge)
{
// Ensure there's enough space to replace the contents with a RelocationOverlay.
JS_ASSERT(baseSize >= sizeof(js::gc::RelocationOverlay));
// Too-large slot arrays cannot be accomodated.
if (numDynamic > MaxNurserySlots) {
tooLarge = true;
return nullptr;
}
// Allocate slots contiguously after the object.
size_t totalSize = baseSize + sizeof(HeapSlot) * numDynamic;
JSObject *obj = static_cast<JSObject *>(allocate(totalSize));
if (!obj) {
tooLarge = false;
return nullptr;
}
obj->setInitialSlots(numDynamic
? reinterpret_cast<HeapSlot *>(size_t(obj) + baseSize)
: nullptr);
return obj;
}
HeapSlot *
ForkJoinNursery::allocateSlots(JSObject *obj, uint32_t nslots)
{
JS_ASSERT(obj);
JS_ASSERT(nslots > 0);
if (nslots & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
return nullptr;
size_t size = nslots * sizeof(HeapSlot);
if (!isInsideNewspace(obj))
return reinterpret_cast<HeapSlot *>(cx_->malloc_(size));
if (nslots > MaxNurserySlots)
return allocateHugeSlots(nslots);
HeapSlot *slots = static_cast<HeapSlot *>(allocate(size));
if (slots)
return slots;
return allocateHugeSlots(nslots);
}
HeapSlot *
ForkJoinNursery::reallocateSlots(JSObject *obj, HeapSlot *oldSlots,
uint32_t oldCount, uint32_t newCount)
{
if (newCount & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
return nullptr;
size_t oldSize = oldCount * sizeof(HeapSlot);
size_t newSize = newCount * sizeof(HeapSlot);
if (!isInsideNewspace(obj)) {
JS_ASSERT_IF(oldSlots, !isInsideNewspace(oldSlots));
return static_cast<HeapSlot *>(cx_->realloc_(oldSlots, oldSize, newSize));
}
if (!isInsideNewspace(oldSlots))
return reallocateHugeSlots(oldSlots, oldSize, newSize);
// No-op if we're shrinking, we can't make use of the freed portion.
if (newCount < oldCount)
return oldSlots;
HeapSlot *newSlots = allocateSlots(obj, newCount);
if (!newSlots)
return nullptr;
js_memcpy(newSlots, oldSlots, oldSize);
return newSlots;
}
ObjectElements *
ForkJoinNursery::allocateElements(JSObject *obj, uint32_t nelems)
{
JS_ASSERT(nelems >= ObjectElements::VALUES_PER_HEADER);
return reinterpret_cast<ObjectElements *>(allocateSlots(obj, nelems));
}
ObjectElements *
ForkJoinNursery::reallocateElements(JSObject *obj, ObjectElements *oldHeader,
uint32_t oldCount, uint32_t newCount)
{
HeapSlot *slots = reallocateSlots(obj, reinterpret_cast<HeapSlot *>(oldHeader),
oldCount, newCount);
return reinterpret_cast<ObjectElements *>(slots);
}
void
ForkJoinNursery::freeSlots(HeapSlot *slots)
{
if (!isInsideNewspace(slots)) {
hugeSlots[hugeSlotsNew].remove(slots);
js_free(slots);
}
}
HeapSlot *
ForkJoinNursery::allocateHugeSlots(size_t nslots)
{
if (nslots & mozilla::tl::MulOverflowMask<sizeof(HeapSlot)>::value)
return nullptr;
size_t size = nslots * sizeof(HeapSlot);
HeapSlot *slots = reinterpret_cast<HeapSlot *>(cx_->malloc_(size));
if (!slots)
return slots;
// If this put fails, we will only leak the slots.
(void)hugeSlots[hugeSlotsNew].put(slots);
return slots;
}
HeapSlot *
ForkJoinNursery::reallocateHugeSlots(HeapSlot *oldSlots, uint32_t oldSize, uint32_t newSize)
{
HeapSlot *newSlots = static_cast<HeapSlot *>(cx_->realloc_(oldSlots, oldSize, newSize));
if (!newSlots)
return newSlots;
if (oldSlots != newSlots) {
hugeSlots[hugeSlotsNew].remove(oldSlots);
// If this put fails, we will only leak the slots.
(void)hugeSlots[hugeSlotsNew].put(newSlots);
}
return newSlots;
}
void
ForkJoinNursery::sweepHugeSlots()
{
for (HugeSlotsSet::Range r = hugeSlots[hugeSlotsFrom].all(); !r.empty(); r.popFront())
js_free(r.front());
hugeSlots[hugeSlotsFrom].clear();
}
MOZ_ALWAYS_INLINE void
ForkJoinNursery::traceObject(ForkJoinNurseryCollectionTracer *trc, JSObject *obj)
{
const Class *clasp = obj->getClass();
if (clasp->trace)
clasp->trace(trc, obj);
if (!obj->isNative())
return;
if (!obj->hasEmptyElements())
markSlots(obj->getDenseElements(), obj->getDenseInitializedLength());
HeapSlot *fixedStart, *fixedEnd, *dynStart, *dynEnd;
obj->getSlotRange(0, obj->slotSpan(), &fixedStart, &fixedEnd, &dynStart, &dynEnd);
markSlots(fixedStart, fixedEnd);
markSlots(dynStart, dynEnd);
}
MOZ_ALWAYS_INLINE void
ForkJoinNursery::markSlots(HeapSlot *vp, uint32_t nslots)
{
markSlots(vp, vp + nslots);
}
MOZ_ALWAYS_INLINE void
ForkJoinNursery::markSlots(HeapSlot *vp, HeapSlot *end)
{
for (; vp != end; ++vp)
markSlot(vp);
}
MOZ_ALWAYS_INLINE void
ForkJoinNursery::markSlot(HeapSlot *slotp)
{
if (!slotp->isObject())
return;
JSObject *obj = &slotp->toObject();
if (!isInsideFromspace(obj))
return;
if (getForwardedPointer(&obj)) {
slotp->unsafeGet()->setObject(*obj);
return;
}
JSObject *moved = static_cast<JSObject *>(moveObjectToTospace(obj));
slotp->unsafeGet()->setObject(*moved);
}
AllocKind
ForkJoinNursery::getObjectAllocKind(JSObject *obj)
{
if (obj->is<ArrayObject>()) {
JS_ASSERT(obj->numFixedSlots() == 0);
// Use minimal size object if we are just going to copy the pointer.
if (!isInsideFromspace((void *)obj->getElementsHeader()))
return FINALIZE_OBJECT0_BACKGROUND;
size_t nelements = obj->getDenseCapacity();
return GetBackgroundAllocKind(GetGCArrayKind(nelements));
}
if (obj->is<JSFunction>())
return obj->as<JSFunction>().getAllocKind();
AllocKind kind = GetGCObjectFixedSlotsKind(obj->numFixedSlots());
JS_ASSERT(!IsBackgroundFinalized(kind));
JS_ASSERT(CanBeFinalizedInBackground(kind, obj->getClass()));
return GetBackgroundAllocKind(kind);
}
void *
ForkJoinNursery::allocateInTospace(gc::AllocKind thingKind)
{
size_t thingSize = Arena::thingSize(thingKind);
if (isEvacuating_) {
void *t = tenured_->arenas.allocateFromFreeList(thingKind, thingSize);
if (t)
return t;
tenured_->arenas.checkEmptyFreeList(thingKind);
// This call may return NULL but should do so only if memory
// is truly exhausted. However, allocateFromArena() can fail
// either because memory is exhausted or if the allocation
// budget is used up. There is a guard in
// Chunk::allocateArena() against the latter case.
return tenured_->arenas.allocateFromArena(evacuationZone_, thingKind);
} else {
// Nursery allocation will never fail during GC - apart from
// true OOM - since newspace is at least as large as
// fromspace; true OOM is caught and signaled within
// ForkJoinNursery::setCurrentChunk().
return allocate(thingSize);
}
}
void *
ForkJoinNursery::allocateInTospace(size_t nelem, size_t elemSize)
{
if (isEvacuating_)
return evacuationZone_->malloc_(nelem * elemSize);
return allocate(nelem * elemSize);
}
MOZ_ALWAYS_INLINE void
ForkJoinNursery::insertIntoFixupList(RelocationOverlay *entry)
{
*tail_ = entry;
tail_ = &entry->next_;
*tail_ = nullptr;
}
void *
ForkJoinNursery::moveObjectToTospace(JSObject *src)
{
AllocKind dstKind = getObjectAllocKind(src);
JSObject *dst = static_cast<JSObject *>(allocateInTospace(dstKind));
if (!dst)
CrashAtUnhandlableOOM("Failed to allocate object while moving object.");
movedSize_ += copyObjectToTospace(dst, src, dstKind);
RelocationOverlay *overlay = reinterpret_cast<RelocationOverlay *>(src);
overlay->forwardTo(dst);
insertIntoFixupList(overlay);
return static_cast<void *>(dst);
}
size_t
ForkJoinNursery::copyObjectToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
{
size_t srcSize = Arena::thingSize(dstKind);
size_t movedSize = srcSize;
// Arrays do not necessarily have the same AllocKind between src and dst.
// We deal with this by copying elements manually, possibly re-inlining
// them if there is adequate room inline in dst.
if (src->is<ArrayObject>())
srcSize = movedSize = sizeof(ObjectImpl);
js_memcpy(dst, src, srcSize);
movedSize += copySlotsToTospace(dst, src, dstKind);
movedSize += copyElementsToTospace(dst, src, dstKind);
if (src->is<TypedArrayObject>())
dst->setPrivate(dst->fixedData(TypedArrayObject::FIXED_DATA_START));
// The shape's list head may point into the old object.
if (&src->shape_ == dst->shape_->listp) {
JS_ASSERT(cx_->isThreadLocal(dst->shape_.get()));
dst->shape_->listp = &dst->shape_;
}
return movedSize;
}
size_t
ForkJoinNursery::copySlotsToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
{
// Fixed slots have already been copied over.
if (!src->hasDynamicSlots())
return 0;
if (!isInsideFromspace(src->slots)) {
hugeSlots[hugeSlotsFrom].remove(src->slots);
if (!isEvacuating_)
hugeSlots[hugeSlotsNew].put(src->slots);
return 0;
}
size_t count = src->numDynamicSlots();
dst->slots = reinterpret_cast<HeapSlot *>(allocateInTospace(count, sizeof(HeapSlot)));
if (!dst->slots)
CrashAtUnhandlableOOM("Failed to allocate slots while moving object.");
js_memcpy(dst->slots, src->slots, count * sizeof(HeapSlot));
setSlotsForwardingPointer(src->slots, dst->slots, count);
return count * sizeof(HeapSlot);
}
size_t
ForkJoinNursery::copyElementsToTospace(JSObject *dst, JSObject *src, AllocKind dstKind)
{
if (src->hasEmptyElements())
return 0;
ObjectElements *srcHeader = src->getElementsHeader();
ObjectElements *dstHeader;
// TODO Bug 874151: Prefer to put element data inline if we have space.
// (Note, not a correctness issue.)
if (!isInsideFromspace(srcHeader)) {
JS_ASSERT(src->elements == dst->elements);
hugeSlots[hugeSlotsFrom].remove(reinterpret_cast<HeapSlot*>(srcHeader));
if (!isEvacuating_)
hugeSlots[hugeSlotsNew].put(reinterpret_cast<HeapSlot*>(srcHeader));
return 0;
}
size_t nslots = ObjectElements::VALUES_PER_HEADER + srcHeader->capacity;
// Unlike other objects, Arrays can have fixed elements.
if (src->is<ArrayObject>() && nslots <= GetGCKindSlots(dstKind)) {
dst->setFixedElements();
dstHeader = dst->getElementsHeader();
js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
setElementsForwardingPointer(srcHeader, dstHeader, nslots);
return nslots * sizeof(HeapSlot);
}
JS_ASSERT(nslots >= 2);
dstHeader = reinterpret_cast<ObjectElements *>(allocateInTospace(nslots, sizeof(HeapSlot)));
if (!dstHeader)
CrashAtUnhandlableOOM("Failed to allocate elements while moving object.");
js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
setElementsForwardingPointer(srcHeader, dstHeader, nslots);
dst->elements = dstHeader->elements();
return nslots * sizeof(HeapSlot);
}
void
ForkJoinNursery::setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots)
{
JS_ASSERT(nslots > 0);
JS_ASSERT(isInsideFromspace(oldSlots));
JS_ASSERT(!isInsideFromspace(newSlots));
*reinterpret_cast<HeapSlot **>(oldSlots) = newSlots;
}
void
ForkJoinNursery::setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader,
uint32_t nelems)
{
// If the JIT has hoisted a zero length pointer, then we do not need to
// relocate it because reads and writes to/from this pointer are invalid.
if (nelems - ObjectElements::VALUES_PER_HEADER < 1)
return;
JS_ASSERT(isInsideFromspace(oldHeader));
JS_ASSERT(!isInsideFromspace(newHeader));
*reinterpret_cast<HeapSlot **>(oldHeader->elements()) = newHeader->elements();
}
ForkJoinNurseryCollectionTracer::ForkJoinNurseryCollectionTracer(JSRuntime *rt,
ForkJoinNursery *nursery)
: JSTracer(rt, ForkJoinNursery::MinorGCCallback, TraceWeakMapKeysValues)
, nursery_(nursery)
{
JS_ASSERT(rt);
JS_ASSERT(nursery);
}
} // namespace gc
} // namespace js
#endif /* JSGC_FJGENERATIONAL */

View File

@ -1,297 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef gc_ForkJoinNursery_h
#define gc_ForkJoinNursery_h
#ifdef JSGC_FJGENERATIONAL
#ifndef JSGC_GENERATIONAL
#error "JSGC_GENERATIONAL is required for the ForkJoinNursery"
#endif
#ifndef JS_THREADSAFE
#error "JS_THREADSAFE is required for the ForkJoinNursery"
#endif
#ifndef JS_ION
#error "JS_ION is required for the ForkJoinNursery"
#endif
#include "jsalloc.h"
#include "jspubtd.h"
#include "gc/Heap.h"
#include "gc/Memory.h"
#include "gc/Nursery.h"
#include "js/HashTable.h"
#include "js/TracingAPI.h"
namespace js {
class ObjectElements;
class HeapSlot;
class ForkJoinShared;
}
namespace js {
namespace gc {
class ForkJoinGCShared;
class ForkJoinNursery;
class ForkJoinNurseryCollectionTracer;
// This tracer comes into play when a class has a tracer function, but
// is otherwise unused and has no other functionality.
//
// It could look like this could be merged into ForkJoinNursery by
// making the latter derive from JSTracer; I've decided to keep them
// separate for now, since it allows for multiple instantiations of
// this class with different parameters, for different purposes. That
// may change.
class ForkJoinNurseryCollectionTracer : public JSTracer
{
friend class ForkJoinNursery;
public:
ForkJoinNurseryCollectionTracer(JSRuntime *rt, ForkJoinNursery *nursery);
private:
ForkJoinNursery *const nursery_;
};
// The layout for a chunk used by the ForkJoinNursery.
struct ForkJoinNurseryChunk
{
// The amount of space in the mapped nursery available to allocations
static const size_t UsableSize = ChunkSize - sizeof(ChunkTrailer);
char data[UsableSize];
ChunkTrailer trailer;
uintptr_t start() { return uintptr_t(&data); }
uintptr_t end() { return uintptr_t(&trailer); }
};
// A GC adapter to ForkJoinShared, which is a complex class hidden
// inside ForkJoin.cpp.
class ForkJoinGCShared
{
public:
ForkJoinGCShared(ForkJoinShared *shared) : shared_(shared) {}
JSRuntime *runtime();
JS::Zone *zone();
// The updatable object (the ForkJoin result array), or nullptr.
JSObject *updatable();
// allocateNurseryChunk() returns nullptr on oom.
ForkJoinNurseryChunk *allocateNurseryChunk();
// p must have been obtained through allocateNurseryChunk.
void freeNurseryChunk(ForkJoinNurseryChunk *p);
// GC statistics output.
void spewGC(const char *fmt, ...);
private:
ForkJoinShared *const shared_;
};
// There is one ForkJoinNursery per ForkJoin worker.
//
// See the comment in ForkJoinNursery.cpp about how it works.
class ForkJoinNursery
{
friend class ForkJoinNurseryCollectionTracer;
friend class RelocationOverlay;
static_assert(sizeof(ForkJoinNurseryChunk) == ChunkSize,
"ForkJoinNursery chunk size must match Chunk size.");
public:
ForkJoinNursery(ForkJoinContext *cx, ForkJoinGCShared *shared, Allocator *tenured);
~ForkJoinNursery();
// Perform a collection within the nursery, and if that for some reason
// cannot be done then perform an evacuating collection.
void minorGC();
// Evacuate the live data from the nursery into the tenured area;
// do not recreate the nursery.
void evacuatingGC();
// Allocate an object with a number of dynamic slots. Returns an
// object, or nullptr in one of two circumstances:
//
// - The nursery was full, the collector must be run, and the
// allocation must be retried. tooLarge is set to 'false'.
// - The number of dynamic slots requested is too large and
// the object should be allocated in the tenured area.
// tooLarge is set to 'true'.
//
// This method will never run the garbage collector.
JSObject *allocateObject(size_t size, size_t numDynamic, bool& tooLarge);
// Allocate and reallocate slot and element arrays for existing
// objects. These will create or maintain the arrays within the
// nursery if possible and appropriate, and otherwise will fall
// back to allocating in the tenured area. They will return
// nullptr only if memory is exhausted. If the reallocate methods
// return nullptr then the old array is still live.
//
// These methods will never run the garbage collector.
HeapSlot *allocateSlots(JSObject *obj, uint32_t nslots);
HeapSlot *reallocateSlots(JSObject *obj, HeapSlot *oldSlots,
uint32_t oldCount, uint32_t newCount);
ObjectElements *allocateElements(JSObject *obj, uint32_t nelems);
ObjectElements *reallocateElements(JSObject *obj, ObjectElements *oldHeader,
uint32_t oldCount, uint32_t newCount);
// Free a slots array.
void freeSlots(HeapSlot *slots);
// The method embedded in a ForkJoinNurseryCollectionTracer
static void MinorGCCallback(JSTracer *trcArg, void **thingp, JSGCTraceKind kind);
// A method called from the JIT frame updater
static void forwardBufferPointer(JSTracer *trc, HeapSlot **pSlotsElems);
// Return true iff obj is inside the current newspace.
MOZ_ALWAYS_INLINE bool isInsideNewspace(const void *obj);
// Return true iff collection is ongoing and obj is inside the current fromspace.
MOZ_ALWAYS_INLINE bool isInsideFromspace(const void *obj);
template <typename T>
MOZ_ALWAYS_INLINE bool getForwardedPointer(T **ref);
static size_t offsetOfPosition() {
return offsetof(ForkJoinNursery, position_);
}
static size_t offsetOfCurrentEnd() {
return offsetof(ForkJoinNursery, currentEnd_);
}
private:
// The largest slot arrays that will be allocated in the nursery.
// On the one hand we want this limit to be large, to avoid
// managing many hugeSlots. On the other hand, slot arrays have
// to be copied during GC and will induce some external
// fragmentation in the nursery at chunk boundaries.
static const size_t MaxNurserySlots = 2048;
// The fixed limit on the per-worker nursery, in chunks.
//
// For production runs, 16 may be good - programs that need it,
// really need it, and as allocation is lazy programs that don't
// need it won't suck up a lot of resources.
//
// For debugging runs, 1 or 2 may sometimes be good, because it
// will more easily provoke bugs in the evacuation paths.
static const size_t MaxNurseryChunks = 16;
// The inverse load factor in the per-worker nursery. Grow the nursery
// or schedule an evacuation if more than 1/NurseryLoadFactor of the
// current nursery size is live after minor GC.
static const int NurseryLoadFactor = 3;
// Allocate an object in the nursery's newspace. Return nullptr
// when allocation fails (ie the object can't fit in the current
// chunk and the number of chunks it at its maximum).
void *allocate(size_t size);
// Allocate an external slot array and register it with this nursery.
HeapSlot *allocateHugeSlots(size_t nslots);
// Reallocate an external slot array, unregister the old array and
// register the new array. If the allocation fails then leave
// everything unchanged.
HeapSlot *reallocateHugeSlots(HeapSlot *oldSlots, uint32_t oldSize, uint32_t newSize);
// Walk the list of registered slot arrays and free them all.
void sweepHugeSlots();
// Set the position/end pointers to correspond to the numbered
// chunk.
void setCurrentChunk(int index);
enum PJSCollectionOp {
Evacuate = 1,
Collect = 2,
Recreate = 4
};
// Misc GC internals.
void pjsCollection(int op /* A combination of PJSCollectionOp bits */);
void initNewspace();
void flip();
void forwardFromRoots(ForkJoinNurseryCollectionTracer *trc);
void forwardFromUpdatable(ForkJoinNurseryCollectionTracer *trc);
void forwardFromStack(ForkJoinNurseryCollectionTracer *trc);
void forwardFromTenured(ForkJoinNurseryCollectionTracer *trc);
void collectToFixedPoint(ForkJoinNurseryCollectionTracer *trc);
void freeFromspace();
void computeNurserySizeAfterGC(size_t live, const char **msg);
AllocKind getObjectAllocKind(JSObject *src);
void *allocateInTospace(AllocKind thingKind);
void *allocateInTospace(size_t nelem, size_t elemSize);
MOZ_ALWAYS_INLINE bool shouldMoveObject(void **thingp);
void *moveObjectToTospace(JSObject *src);
size_t copyObjectToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
size_t copyElementsToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
size_t copySlotsToTospace(JSObject *dst, JSObject *src, gc::AllocKind dstKind);
MOZ_ALWAYS_INLINE void insertIntoFixupList(RelocationOverlay *entry);
void setSlotsForwardingPointer(HeapSlot *oldSlots, HeapSlot *newSlots, uint32_t nslots);
void setElementsForwardingPointer(ObjectElements *oldHeader, ObjectElements *newHeader,
uint32_t nelems);
MOZ_ALWAYS_INLINE void traceObject(ForkJoinNurseryCollectionTracer *trc, JSObject *obj);
MOZ_ALWAYS_INLINE void markSlots(HeapSlot *vp, uint32_t nslots);
MOZ_ALWAYS_INLINE void markSlots(HeapSlot *vp, HeapSlot *end);
MOZ_ALWAYS_INLINE void markSlot(HeapSlot *slotp);
ForkJoinContext *const cx_; // The context that owns this nursery
Allocator *const tenured_; // Private tenured area
ForkJoinGCShared *const shared_; // Common to all nurseries belonging to a ForkJoin instance
JS::Zone *evacuationZone_; // During evacuating GC this is non-NULL: the Zone we
// allocate into
uintptr_t currentStart_; // Start of current area in newspace
uintptr_t currentEnd_; // End of current area in newspace (last byte + 1)
uintptr_t position_; // Next free byte in current newspace chunk
unsigned currentChunk_; // Index of current / highest numbered chunk in newspace
unsigned numActiveChunks_; // Number of active chunks in newspace, not all may be allocated
unsigned numFromspaceChunks_; // Number of active chunks in fromspace, all are allocated
bool mustEvacuate_; // Set to true after GC when the /next/ minor GC must evacuate
bool isEvacuating_; // Set to true when the current minor GC is evacuating
size_t movedSize_; // Bytes copied during the current minor GC
RelocationOverlay *head_; // First node of relocation list
RelocationOverlay **tail_; // Pointer to 'next_' field of last node of relocation list
typedef HashSet<HeapSlot *, PointerHasher<HeapSlot *, 3>, SystemAllocPolicy> HugeSlotsSet;
HugeSlotsSet hugeSlots[2]; // Hash sets for huge slots
int hugeSlotsNew; // Huge slot arrays in the newspace (index in hugeSlots)
int hugeSlotsFrom; // Huge slot arrays in the fromspace (index in hugeSlots)
ForkJoinNurseryChunk *newspace[MaxNurseryChunks]; // All allocation happens here
ForkJoinNurseryChunk *fromspace[MaxNurseryChunks]; // Meaningful during GC: the previous newspace
};
} // namespace gc
} // namespace js
#endif // JSGC_FJGENERATIONAL
#endif // gc_ForkJoinNursery_h

View File

@ -19,13 +19,6 @@ namespace gc {
void
MarkPersistentRootedChains(JSTracer *trc);
#ifdef JSGC_FJGENERATIONAL
class ForkJoinNurseryCollectionTracer;
void
MarkForkJoinStack(ForkJoinNurseryCollectionTracer *trc);
#endif
class AutoCopyFreeListToArenas
{
JSRuntime *runtime;

View File

@ -466,16 +466,6 @@ class GCRuntime
js::gc::StoreBuffer storeBuffer;
#endif
/*
* ForkJoin workers enter and leave GC independently; this counter
* tracks the number that are currently in GC.
*
* Technically this should be #ifdef JSGC_FJGENERATIONAL but that
* affects the observed size of JSRuntime in problematic ways, see
* note in vm/ThreadPool.h.
*/
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> fjCollectionCounter;
/*
* These options control the zealousness of the GC. The fundamental values
* are nextScheduled and gcDebugCompartmentGC. At every allocation,

View File

@ -171,16 +171,6 @@ CheckMarkedThing(JSTracer *trc, T **thingp)
JS_ASSERT(*thingp);
#ifdef DEBUG
#ifdef JSGC_FJGENERATIONAL
/*
* The code below (runtimeFromMainThread(), etc) makes assumptions
* not valid for the ForkJoin worker threads during ForkJoin GGC,
* so just bail.
*/
if (ForkJoinContext::current())
return;
#endif
/* This function uses data that's not available in the nursery. */
if (IsInsideNursery(thing))
return;
@ -263,16 +253,6 @@ MarkInternal(JSTracer *trc, T **thingp)
T *thing = *thingp;
if (!trc->callback) {
#ifdef JSGC_FJGENERATIONAL
/*
* This case should never be reached from PJS collections as
* those should all be using a ForkJoinNurseryCollectionTracer
* that carries a callback.
*/
JS_ASSERT(!ForkJoinContext::current());
JS_ASSERT(!trc->runtime()->isFJMinorCollecting());
#endif
/*
* We may mark a Nursery thing outside the context of the
* MinorCollectionTracer because of a pre-barrier. The pre-barrier is
@ -401,25 +381,11 @@ IsMarked(T **thingp)
JS_ASSERT(thingp);
JS_ASSERT(*thingp);
#ifdef JSGC_GENERATIONAL
JSRuntime* rt = (*thingp)->runtimeFromAnyThread();
#ifdef JSGC_FJGENERATIONAL
// Must precede the case for JSGC_GENERATIONAL because IsInsideNursery()
// will also be true for the ForkJoinNursery.
if (rt->isFJMinorCollecting()) {
ForkJoinContext *ctx = ForkJoinContext::current();
ForkJoinNursery &fjNursery = ctx->fjNursery();
if (fjNursery.isInsideFromspace(*thingp))
return fjNursery.getForwardedPointer(thingp);
if (IsInsideNursery(*thingp)) {
Nursery &nursery = (*thingp)->runtimeFromMainThread()->gc.nursery;
return nursery.getForwardedPointer(thingp);
}
else
#endif
{
if (IsInsideNursery(*thingp)) {
Nursery &nursery = rt->gc.nursery;
return nursery.getForwardedPointer(thingp);
}
}
#endif // JSGC_GENERATIONAL
Zone *zone = (*thingp)->tenuredZone();
if (!zone->isCollecting() || zone->isGCFinished())
return true;
@ -441,25 +407,14 @@ IsAboutToBeFinalized(T **thingp)
return false;
#ifdef JSGC_GENERATIONAL
#ifdef JSGC_FJGENERATIONAL
if (rt->isFJMinorCollecting()) {
ForkJoinContext *ctx = ForkJoinContext::current();
ForkJoinNursery &fjNursery = ctx->fjNursery();
if (fjNursery.isInsideFromspace(thing))
return !fjNursery.getForwardedPointer(thingp);
Nursery &nursery = rt->gc.nursery;
JS_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing));
if (rt->isHeapMinorCollecting()) {
if (IsInsideNursery(thing))
return !nursery.getForwardedPointer(thingp);
return false;
}
else
#endif
{
Nursery &nursery = rt->gc.nursery;
JS_ASSERT_IF(!rt->isHeapMinorCollecting(), !IsInsideNursery(thing));
if (rt->isHeapMinorCollecting()) {
if (IsInsideNursery(thing))
return !nursery.getForwardedPointer(thingp);
return false;
}
}
#endif // JSGC_GENERATIONAL
if (!thing->tenuredZone()->isGCSweeping())
return false;
@ -482,20 +437,9 @@ UpdateIfRelocated(JSRuntime *rt, T **thingp)
{
JS_ASSERT(thingp);
#ifdef JSGC_GENERATIONAL
#ifdef JSGC_FJGENERATIONAL
if (*thingp && rt->isFJMinorCollecting()) {
ForkJoinContext *ctx = ForkJoinContext::current();
ForkJoinNursery &fjNursery = ctx->fjNursery();
if (fjNursery.isInsideFromspace(*thingp))
fjNursery.getForwardedPointer(thingp);
}
else
if (*thingp && rt->isHeapMinorCollecting() && IsInsideNursery(*thingp))
rt->gc.nursery.getForwardedPointer(thingp);
#endif
{
if (*thingp && rt->isHeapMinorCollecting() && IsInsideNursery(*thingp))
rt->gc.nursery.getForwardedPointer(thingp);
}
#endif // JSGC_GENERATIONAL
return *thingp;
}

View File

@ -17,41 +17,52 @@
namespace js {
namespace gc {
/* static */
inline RelocationOverlay *
RelocationOverlay::fromCell(Cell *cell)
/*
* This structure overlays a Cell in the Nursery and re-purposes its memory
* for managing the Nursery collection process.
*/
class RelocationOverlay
{
JS_ASSERT(!cell->isTenured());
return reinterpret_cast<RelocationOverlay *>(cell);
}
friend class MinorCollectionTracer;
inline bool
RelocationOverlay::isForwarded() const
{
return magic_ == Relocated;
}
/* The low bit is set so this should never equal a normal pointer. */
static const uintptr_t Relocated = uintptr_t(0xbad0bad1);
inline Cell *
RelocationOverlay::forwardingAddress() const
{
JS_ASSERT(isForwarded());
return newLocation_;
}
/* Set to Relocated when moved. */
uintptr_t magic_;
inline void
RelocationOverlay::forwardTo(Cell *cell)
{
JS_ASSERT(!isForwarded());
magic_ = Relocated;
newLocation_ = cell;
next_ = nullptr;
}
/* The location |this| was moved to. */
Cell *newLocation_;
inline RelocationOverlay *
RelocationOverlay::next() const
{
return next_;
}
/* A list entry to track all relocated things. */
RelocationOverlay *next_;
public:
static RelocationOverlay *fromCell(Cell *cell) {
JS_ASSERT(!cell->isTenured());
return reinterpret_cast<RelocationOverlay *>(cell);
}
bool isForwarded() const {
return magic_ == Relocated;
}
Cell *forwardingAddress() const {
JS_ASSERT(isForwarded());
return newLocation_;
}
void forwardTo(Cell *cell) {
JS_ASSERT(!isForwarded());
magic_ = Relocated;
newLocation_ = cell;
next_ = nullptr;
}
RelocationOverlay *next() const {
return next_;
}
};
} /* namespace gc */
} /* namespace js */

View File

@ -912,10 +912,6 @@ js::Nursery::collect(JSRuntime *rt, JS::gcreason::Reason reason, TypeObjectList
#endif
}
#undef TIME_START
#undef TIME_END
#undef TIME_TOTAL
void
js::Nursery::freeHugeSlots()
{

View File

@ -36,7 +36,6 @@ namespace gc {
class Cell;
class Collector;
class MinorCollectionTracer;
class ForkJoinNursery;
} /* namespace gc */
namespace types {
@ -50,39 +49,6 @@ class ICStubCompiler;
class BaselineCompiler;
}
namespace gc {
/*
* This structure overlays a Cell in the Nursery and re-purposes its memory
* for managing the Nursery collection process.
*/
class RelocationOverlay
{
friend class MinorCollectionTracer;
friend class ForkJoinNursery;
/* The low bit is set so this should never equal a normal pointer. */
static const uintptr_t Relocated = uintptr_t(0xbad0bad1);
/* Set to Relocated when moved. */
uintptr_t magic_;
/* The location |this| was moved to. */
Cell *newLocation_;
/* A list entry to track all relocated things. */
RelocationOverlay *next_;
public:
static inline RelocationOverlay *fromCell(Cell *cell);
inline bool isForwarded() const;
inline Cell *forwardingAddress() const;
inline void forwardTo(Cell *cell);
inline RelocationOverlay *next() const;
};
} /* namespace gc */
class Nursery
{
public:
@ -249,7 +215,7 @@ class Nursery
MOZ_ALWAYS_INLINE void initChunk(int chunkno) {
NurseryChunkLayout &c = chunk(chunkno);
c.trailer.storeBuffer = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr();
c.trailer.location = gc::ChunkLocationBitNursery;
c.trailer.location = gc::ChunkLocationNursery;
c.trailer.runtime = runtime();
}

View File

@ -19,7 +19,6 @@
#include "builtin/MapObject.h"
#include "frontend/BytecodeCompiler.h"
#include "gc/ForkJoinNursery.h"
#include "gc/GCInternals.h"
#include "gc/Marking.h"
#ifdef JS_ION
@ -126,14 +125,6 @@ MarkExactStackRoots(JSTracer *trc)
MarkExactStackRootsForType<JSPropertyDescriptor, MarkPropertyDescriptorRoot>(trc);
MarkExactStackRootsForType<PropDesc, MarkPropDescRoot>(trc);
}
static void
MarkExactStackRoots(ThreadSafeContext *cx, JSTracer *trc)
{
for (unsigned i = 0; i < THING_ROOT_LIMIT; i++)
MarkExactStackRootList(trc, cx->thingGCRooters[i], ThingRootKind(i));
}
#endif /* JSGC_USE_EXACT_ROOTING */
enum ConservativeGCTest
@ -590,15 +581,17 @@ AutoGCRooter::trace(JSTracer *trc)
/* static */ void
AutoGCRooter::traceAll(JSTracer *trc)
{
for (ContextIter cx(trc->runtime()); !cx.done(); cx.next())
traceAllInContext(&*cx, trc);
for (ContextIter cx(trc->runtime()); !cx.done(); cx.next()) {
for (js::AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down)
gcr->trace(trc);
}
}
/* static */ void
AutoGCRooter::traceAllWrappers(JSTracer *trc)
{
for (ContextIter cx(trc->runtime()); !cx.done(); cx.next()) {
for (AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down) {
for (js::AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down) {
if (gcr->tag_ == WRAPVECTOR || gcr->tag_ == WRAPPER)
gcr->trace(trc);
}
@ -690,27 +683,6 @@ js::gc::MarkPersistentRootedChains(JSTracer *trc)
"PersistentRooted<Value>");
}
#ifdef JSGC_FJGENERATIONAL
void
js::gc::MarkForkJoinStack(ForkJoinNurseryCollectionTracer *trc)
{
ForkJoinContext *cx = ForkJoinContext::current();
PerThreadData *ptd = cx->perThreadData;
AutoGCRooter::traceAllInContext(cx, trc);
MarkExactStackRoots(cx, trc);
jit::MarkJitActivations(ptd, trc);
#ifdef DEBUG
// There should be only JIT activations on the stack
for (ActivationIterator iter(ptd); !iter.done(); ++iter) {
Activation *act = iter.activation();
JS_ASSERT(act->isJit());
}
#endif
}
#endif // JSGC_FJGENERATIONAL
void
js::gc::GCRuntime::markRuntime(JSTracer *trc, bool useSavedRoots)
{
@ -812,10 +784,10 @@ js::gc::GCRuntime::markRuntime(JSTracer *trc, bool useSavedRoots)
c->debugScopes->mark(trc);
}
MarkInterpreterActivations(&rt->mainThread, trc);
MarkInterpreterActivations(rt, trc);
#ifdef JS_ION
jit::MarkJitActivations(&rt->mainThread, trc);
jit::MarkJitActivations(rt, trc);
#endif
if (!isHeapMinorCollecting()) {

View File

@ -1149,8 +1149,7 @@ CodeGenerator::visitLambdaPar(LLambdaPar *lir)
JS_ASSERT(scopeChainReg != resultReg);
if (!emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, info.fun))
return false;
emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, info.fun);
emitLambdaInit(resultReg, scopeChainReg, info);
return true;
}
@ -3899,13 +3898,10 @@ CodeGenerator::visitNewCallObjectPar(LNewCallObjectPar *lir)
Register tempReg2 = ToRegister(lir->getTemp1());
JSObject *templateObj = lir->mir()->templateObj();
return emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, templateObj);
emitAllocateGCThingPar(lir, resultReg, cxReg, tempReg1, tempReg2, templateObj);
return true;
}
typedef JSObject *(*ExtendArrayParFn)(ForkJoinContext*, JSObject*, uint32_t);
static const VMFunction ExtendArrayParInfo =
FunctionInfo<ExtendArrayParFn>(ExtendArrayPar);
bool
CodeGenerator::visitNewDenseArrayPar(LNewDenseArrayPar *lir)
{
@ -3916,23 +3912,26 @@ CodeGenerator::visitNewDenseArrayPar(LNewDenseArrayPar *lir)
Register tempReg2 = ToRegister(lir->getTemp2());
JSObject *templateObj = lir->mir()->templateObject();
masm.push(lengthReg);
if (!emitAllocateGCThingPar(lir, tempReg2, cxReg, tempReg0, tempReg1, templateObj))
return false;
masm.pop(lengthReg);
// Allocate the array into tempReg2. Don't use resultReg because it
// may alias cxReg etc.
emitAllocateGCThingPar(lir, tempReg2, cxReg, tempReg0, tempReg1, templateObj);
// Invoke a C helper to allocate the elements. The helper returns
// nullptr on allocation error or the array object.
saveLive(lir);
pushArg(lengthReg);
pushArg(tempReg2);
if (!callVM(ExtendArrayParInfo, lir))
return false;
storeResultTo(ToRegister(lir->output()));
restoreLive(lir);
// Invoke a C helper to allocate the elements. For convenience,
// this helper also returns the array back to us, or nullptr, which
// obviates the need to preserve the register across the call. In
// reality, we should probably just have the C helper also
// *allocate* the array, but that would require that it initialize
// the various fields of the object, and I didn't want to
// duplicate the code in initGCThing() that already does such an
// admirable job.
masm.setupUnalignedABICall(3, tempReg0);
masm.passABIArg(cxReg);
masm.passABIArg(tempReg2);
masm.passABIArg(lengthReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ExtendArrayPar));
Register resultReg = ToRegister(lir->output());
JS_ASSERT(resultReg == ReturnReg);
OutOfLineAbortPar *bail = oolAbortPar(ParallelBailoutOutOfMemory, lir);
if (!bail)
return false;
@ -3977,10 +3976,10 @@ CodeGenerator::visitNewPar(LNewPar *lir)
Register tempReg1 = ToRegister(lir->getTemp0());
Register tempReg2 = ToRegister(lir->getTemp1());
JSObject *templateObject = lir->mir()->templateObject();
return emitAllocateGCThingPar(lir, objReg, cxReg, tempReg1, tempReg2, templateObject);
emitAllocateGCThingPar(lir, objReg, cxReg, tempReg1, tempReg2, templateObject);
return true;
}
#ifndef JSGC_FJGENERATIONAL
class OutOfLineNewGCThingPar : public OutOfLineCodeBase<CodeGenerator>
{
public:
@ -3998,27 +3997,15 @@ public:
return codegen->visitOutOfLineNewGCThingPar(this);
}
};
#endif // JSGC_FJGENERATIONAL
typedef JSObject *(*NewGCThingParFn)(ForkJoinContext *, js::gc::AllocKind allocKind);
static const VMFunction NewGCThingParInfo =
FunctionInfo<NewGCThingParFn>(NewGCThingPar);
bool
CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Register cxReg,
Register tempReg1, Register tempReg2, JSObject *templateObj)
{
gc::AllocKind allocKind = templateObj->tenuredGetAllocKind();
#ifdef JSGC_FJGENERATIONAL
OutOfLineCode *ool = oolCallVM(NewGCThingParInfo, lir,
(ArgList(), Imm32(allocKind)), StoreRegisterTo(objReg));
if (!ool)
return false;
#else
OutOfLineNewGCThingPar *ool = new(alloc()) OutOfLineNewGCThingPar(lir, allocKind, objReg, cxReg);
if (!ool || !addOutOfLineCode(ool))
return false;
#endif
masm.newGCThingPar(objReg, cxReg, tempReg1, tempReg2, templateObj, ool->entry());
masm.bind(ool->rejoin());
@ -4026,7 +4013,6 @@ CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Regist
return true;
}
#ifndef JSGC_FJGENERATIONAL
bool
CodeGenerator::visitOutOfLineNewGCThingPar(OutOfLineNewGCThingPar *ool)
{
@ -4052,7 +4038,6 @@ CodeGenerator::visitOutOfLineNewGCThingPar(OutOfLineNewGCThingPar *ool)
masm.jump(ool->rejoin());
return true;
}
#endif // JSGC_FJGENERATIONAL
bool
CodeGenerator::visitAbortPar(LAbortPar *lir)
@ -6506,7 +6491,7 @@ static const VMFunctionsModal InitRestParameterInfo = VMFunctionsModal(
bool
CodeGenerator::emitRest(LInstruction *lir, Register array, Register numActuals,
Register temp0, Register temp1, unsigned numFormals,
JSObject *templateObject, bool saveAndRestore, Register resultreg)
JSObject *templateObject)
{
// Compute actuals() + numFormals.
size_t actualsOffset = frameSize() + IonJSFrameLayout::offsetOfActualArgs();
@ -6525,22 +6510,12 @@ CodeGenerator::emitRest(LInstruction *lir, Register array, Register numActuals,
}
masm.bind(&joinLength);
if (saveAndRestore)
saveLive(lir);
pushArg(array);
pushArg(ImmGCPtr(templateObject));
pushArg(temp1);
pushArg(temp0);
bool result = callVM(InitRestParameterInfo, lir);
if (saveAndRestore) {
storeResultTo(resultreg);
restoreLive(lir);
}
return result;
return callVM(InitRestParameterInfo, lir);
}
bool
@ -6562,12 +6537,9 @@ CodeGenerator::visitRest(LRest *lir)
}
masm.bind(&joinAlloc);
return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, false, ToRegister(lir->output()));
return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject);
}
// LRestPar cannot derive from LCallInstructionHelper because emitAllocateGCThingPar may
// itself contain a VM call. Thus there's some manual work here and in emitRest().
bool
CodeGenerator::visitRestPar(LRestPar *lir)
{
@ -6579,12 +6551,10 @@ CodeGenerator::visitRestPar(LRestPar *lir)
unsigned numFormals = lir->mir()->numFormals();
JSObject *templateObject = lir->mir()->templateObject();
masm.push(numActuals);
if (!emitAllocateGCThingPar(lir, temp2, cx, temp0, temp1, templateObject))
return false;
masm.pop(numActuals);
return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject, true, ToRegister(lir->output()));
return emitRest(lir, temp2, numActuals, temp0, temp1, numFormals, templateObject);
}
bool

View File

@ -268,7 +268,7 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitRunOncePrologue(LRunOncePrologue *lir);
bool emitRest(LInstruction *lir, Register array, Register numActuals,
Register temp0, Register temp1, unsigned numFormals,
JSObject *templateObject, bool saveAndRestore, Register resultreg);
JSObject *templateObject);
bool visitRest(LRest *lir);
bool visitRestPar(LRestPar *lir);
bool visitCallSetProperty(LCallSetProperty *ins);

View File

@ -10,7 +10,6 @@
#include "jsobj.h"
#include "jsscript.h"
#include "gc/ForkJoinNursery.h"
#include "gc/Marking.h"
#include "jit/BaselineDebugModeOSR.h"
#include "jit/BaselineFrame.h"
@ -868,8 +867,10 @@ MarkIonJSFrame(JSTracer *trc, const JitFrameIterator &frame)
// longer reachable through the callee token (JSFunction/JSScript->ion
// is now nullptr or recompiled). Manually trace it here.
IonScript::Trace(trc, ionScript);
} else if (CalleeTokenIsFunction(layout->calleeToken())) {
ionScript = CalleeTokenToFunction(layout->calleeToken())->nonLazyScript()->ionScript();
} else {
ionScript = frame.ionScriptFromCalleeToken();
ionScript = CalleeTokenToScript(layout->calleeToken())->ionScript();
}
if (CalleeTokenIsFunction(layout->calleeToken()))
@ -936,8 +937,10 @@ UpdateIonJSFrameForMinorGC(JSTracer *trc, const JitFrameIterator &frame)
// This frame has been invalidated, meaning that its IonScript is no
// longer reachable through the callee token (JSFunction/JSScript->ion
// is now nullptr or recompiled).
} else if (CalleeTokenIsFunction(layout->calleeToken())) {
ionScript = CalleeTokenToFunction(layout->calleeToken())->nonLazyScript()->ionScript();
} else {
ionScript = frame.ionScriptFromCalleeToken();
ionScript = CalleeTokenToScript(layout->calleeToken())->ionScript();
}
const SafepointIndex *si = ionScript->getSafepointIndex(frame.returnAddressToFp());
@ -947,16 +950,8 @@ UpdateIonJSFrameForMinorGC(JSTracer *trc, const JitFrameIterator &frame)
uintptr_t *spill = frame.spillBase();
for (GeneralRegisterBackwardIterator iter(safepoint.allGprSpills()); iter.more(); iter++) {
--spill;
if (slotsRegs.has(*iter)) {
#ifdef JSGC_FJGENERATIONAL
if (trc->callback == gc::ForkJoinNursery::MinorGCCallback) {
gc::ForkJoinNursery::forwardBufferPointer(trc,
reinterpret_cast<HeapSlot **>(spill));
continue;
}
#endif
if (slotsRegs.has(*iter))
trc->runtime()->gc.nursery.forwardBufferPointer(reinterpret_cast<HeapSlot **>(spill));
}
}
// Skip to the right place in the safepoint
@ -970,12 +965,6 @@ UpdateIonJSFrameForMinorGC(JSTracer *trc, const JitFrameIterator &frame)
while (safepoint.getSlotsOrElementsSlot(&slot)) {
HeapSlot **slots = reinterpret_cast<HeapSlot **>(layout->slotRef(slot));
#ifdef JSGC_FJGENERATIONAL
if (trc->callback == gc::ForkJoinNursery::MinorGCCallback) {
gc::ForkJoinNursery::forwardBufferPointer(trc, slots);
continue;
}
#endif
trc->runtime()->gc.nursery.forwardBufferPointer(slots);
}
}
@ -1237,9 +1226,9 @@ MarkJitActivation(JSTracer *trc, const JitActivationIterator &activations)
}
void
MarkJitActivations(PerThreadData *ptd, JSTracer *trc)
MarkJitActivations(JSRuntime *rt, JSTracer *trc)
{
for (JitActivationIterator activations(ptd); !activations.done(); ++activations)
for (JitActivationIterator activations(rt); !activations.done(); ++activations)
MarkJitActivation(trc, activations);
}
@ -1267,22 +1256,6 @@ UpdateJitActivationsForMinorGC(JSRuntime *rt, JSTracer *trc)
}
}
}
void
UpdateJitActivationsForMinorGC(PerThreadData *ptd, JSTracer *trc)
{
#ifdef JSGC_FJGENERATIONAL
JS_ASSERT(trc->runtime()->isHeapMinorCollecting() || trc->runtime()->isFJMinorCollecting());
#else
JS_ASSERT(trc->runtime()->isHeapMinorCollecting());
#endif
for (JitActivationIterator activations(ptd); !activations.done(); ++activations) {
for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
if (frames.type() == JitFrame_IonJS)
UpdateIonJSFrameForMinorGC(trc, frames);
}
}
}
#endif
void
@ -1677,15 +1650,6 @@ JitFrameIterator::ionScript() const
IonScript *ionScript = nullptr;
if (checkInvalidation(&ionScript))
return ionScript;
return ionScriptFromCalleeToken();
}
IonScript *
JitFrameIterator::ionScriptFromCalleeToken() const
{
JS_ASSERT(type() == JitFrame_IonJS);
JS_ASSERT(!checkInvalidation());
switch (GetCalleeTokenTag(calleeToken())) {
case CalleeToken_Function:
case CalleeToken_Script:

View File

@ -264,7 +264,7 @@ void HandleParallelFailure(ResumeFromException *rfe);
void EnsureExitFrame(IonCommonFrameLayout *frame);
void MarkJitActivations(PerThreadData *ptd, JSTracer *trc);
void MarkJitActivations(JSRuntime *rt, JSTracer *trc);
void MarkIonCompilerRoots(JSTracer *trc);
JSCompartment *
@ -272,7 +272,6 @@ TopmostIonActivationCompartment(JSRuntime *rt);
#ifdef JSGC_GENERATIONAL
void UpdateJitActivationsForMinorGC(JSRuntime *rt, JSTracer *trc);
void UpdateJitActivationsForMinorGC(PerThreadData *ptd, JSTracer *trc);
#endif
static inline uint32_t

View File

@ -627,54 +627,11 @@ MacroAssembler::newGCFatInlineString(Register result, Register temp, Label *fail
void
MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
gc::AllocKind allocKind, Label *fail)
{
#ifdef JSGC_FJGENERATIONAL
if (IsNurseryAllocable(allocKind))
return newGCNurseryThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
#endif
return newGCTenuredThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
}
#ifdef JSGC_FJGENERATIONAL
void
MacroAssembler::newGCNurseryThingPar(Register result, Register cx,
Register tempReg1, Register tempReg2,
gc::AllocKind allocKind, Label *fail)
{
JS_ASSERT(IsNurseryAllocable(allocKind));
uint32_t thingSize = uint32_t(gc::Arena::thingSize(allocKind));
// Correctness depends on thingSize being smaller than a chunk
// (not a problem) and the last chunk of the nursery not being
// located at the very top of the address space. The regular
// Nursery makes the same assumption, see nurseryAllocate() above.
// The ForkJoinNursery is a member variable of the ForkJoinContext.
size_t offsetOfPosition =
ForkJoinContext::offsetOfFJNursery() + gc::ForkJoinNursery::offsetOfPosition();
size_t offsetOfEnd =
ForkJoinContext::offsetOfFJNursery() + gc::ForkJoinNursery::offsetOfCurrentEnd();
loadPtr(Address(cx, offsetOfPosition), result);
loadPtr(Address(cx, offsetOfEnd), tempReg2);
computeEffectiveAddress(Address(result, thingSize), tempReg1);
branchPtr(Assembler::Below, tempReg2, tempReg1, fail);
storePtr(tempReg1, Address(cx, offsetOfPosition));
}
#endif
void
MacroAssembler::newGCTenuredThingPar(Register result, Register cx,
Register tempReg1, Register tempReg2,
gc::AllocKind allocKind, Label *fail)
{
// Similar to ::newGCThing(), except that it allocates from a custom
// Allocator in the ForkJoinContext*, rather than being hardcoded to the
// compartment allocator. This requires two temporary registers.
//
// When the ForkJoin generational collector is enabled this is only used
// for those object types that cannot be allocated in the ForkJoinNursery.
//
// Subtle: I wanted to reuse `result` for one of the temporaries, but the
// register allocator was assigning it to the same register as `cx`.
// Then we overwrite that register which messed up the OOL code.
@ -729,14 +686,14 @@ void
MacroAssembler::newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
Label *fail)
{
newGCTenuredThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_STRING, fail);
newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_STRING, fail);
}
void
MacroAssembler::newGCFatInlineStringPar(Register result, Register cx, Register tempReg1,
Register tempReg2, Label *fail)
{
newGCTenuredThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
}
void

View File

@ -827,12 +827,6 @@ class MacroAssembler : public MacroAssemblerSpecific
void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
gc::AllocKind allocKind, Label *fail);
#ifdef JSGC_FJGENERATIONAL
void newGCNurseryThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
gc::AllocKind allocKind, Label *fail);
#endif
void newGCTenuredThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
gc::AllocKind allocKind, Label *fail);
void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
JSObject *templateObject, Label *fail);
void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,

View File

@ -201,10 +201,6 @@ class JitFrameIterator
// Returns the IonScript associated with this JS frame.
IonScript *ionScript() const;
// Returns the IonScript associated with this JS frame; the frame must
// not be invalidated.
IonScript *ionScriptFromCalleeToken() const;
// Returns the Safepoint associated with this JS frame. Incurs a lookup
// overhead.
const SafepointIndex *safepoint() const;

View File

@ -374,7 +374,7 @@ class LNewPar : public LInstructionHelper<1, 1, 2>
}
};
class LNewDenseArrayPar : public LInstructionHelper<1, 2, 3>
class LNewDenseArrayPar : public LCallInstructionHelper<1, 2, 3>
{
public:
LIR_HEADER(NewDenseArrayPar);
@ -5361,7 +5361,7 @@ class LRest : public LCallInstructionHelper<1, 1, 3>
}
};
class LRestPar : public LInstructionHelper<1, 2, 3>
class LRestPar : public LCallInstructionHelper<1, 2, 3>
{
public:
LIR_HEADER(RestPar);

View File

@ -132,7 +132,11 @@ LIRGenerator::visitCheckOverRecursedPar(MCheckOverRecursedPar *ins)
{
LCheckOverRecursedPar *lir =
new(alloc()) LCheckOverRecursedPar(useRegister(ins->forkJoinContext()), temp());
return add(lir, ins) && assignSafepoint(lir, ins);
if (!add(lir, ins))
return false;
if (!assignSafepoint(lir, ins))
return false;
return true;
}
bool
@ -225,7 +229,7 @@ LIRGenerator::visitNewCallObjectPar(MNewCallObjectPar *ins)
{
const LAllocation &parThreadContext = useRegister(ins->forkJoinContext());
LNewCallObjectPar *lir = LNewCallObjectPar::New(alloc(), parThreadContext, temp(), temp());
return define(lir, ins) && assignSafepoint(lir, ins);
return define(lir, ins);
}
bool
@ -2090,7 +2094,7 @@ LIRGenerator::visitLambdaPar(MLambdaPar *ins)
LLambdaPar *lir = new(alloc()) LLambdaPar(useRegister(ins->forkJoinContext()),
useRegister(ins->scopeChain()),
temp(), temp());
return define(lir, ins) && assignSafepoint(lir, ins);
return define(lir, ins);
}
bool
@ -2196,30 +2200,30 @@ LIRGenerator::visitInterruptCheckPar(MInterruptCheckPar *ins)
{
LInterruptCheckPar *lir =
new(alloc()) LInterruptCheckPar(useRegister(ins->forkJoinContext()), temp());
return add(lir, ins) && assignSafepoint(lir, ins);
if (!add(lir, ins))
return false;
if (!assignSafepoint(lir, ins))
return false;
return true;
}
bool
LIRGenerator::visitNewPar(MNewPar *ins)
{
LNewPar *lir = new(alloc()) LNewPar(useRegister(ins->forkJoinContext()), temp(), temp());
return define(lir, ins) && assignSafepoint(lir, ins);
return define(lir, ins);
}
bool
LIRGenerator::visitNewDenseArrayPar(MNewDenseArrayPar *ins)
{
JS_ASSERT(ins->forkJoinContext()->type() == MIRType_ForkJoinContext);
JS_ASSERT(ins->length()->type() == MIRType_Int32);
JS_ASSERT(ins->type() == MIRType_Object);
LNewDenseArrayPar *lir =
new(alloc()) LNewDenseArrayPar(useRegister(ins->forkJoinContext()),
useRegister(ins->length()),
temp(),
temp(),
temp());
return define(lir, ins) && assignSafepoint(lir, ins);
new(alloc()) LNewDenseArrayPar(useFixed(ins->forkJoinContext(), CallTempReg0),
useFixed(ins->length(), CallTempReg1),
tempFixed(CallTempReg2),
tempFixed(CallTempReg3),
tempFixed(CallTempReg4));
return defineReturn(lir, ins);
}
bool
@ -3315,12 +3319,12 @@ LIRGenerator::visitRestPar(MRestPar *ins)
{
JS_ASSERT(ins->numActuals()->type() == MIRType_Int32);
LRestPar *lir = new(alloc()) LRestPar(useRegister(ins->forkJoinContext()),
useRegister(ins->numActuals()),
temp(),
temp(),
temp());
return define(lir, ins) && assignSafepoint(lir, ins);
LRestPar *lir = new(alloc()) LRestPar(useFixed(ins->forkJoinContext(), CallTempReg0),
useFixed(ins->numActuals(), CallTempReg1),
tempFixed(CallTempReg2),
tempFixed(CallTempReg3),
tempFixed(CallTempReg4));
return defineReturn(lir, ins) && assignSafepoint(lir, ins);
}
bool

View File

@ -1692,10 +1692,6 @@ class MNewPar : public MUnaryInstruction
JSObject *templateObject() const {
return templateObject_;
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
};
// Creates a new derived type object. At runtime, this is just a call
@ -9832,10 +9828,6 @@ class MNewDenseArrayPar : public MBinaryInstruction
bool possiblyCalls() const {
return true;
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
};
// A resume point contains the information needed to reconstruct the Baseline

View File

@ -36,11 +36,7 @@ JSObject *
jit::NewGCThingPar(ForkJoinContext *cx, gc::AllocKind allocKind)
{
JS_ASSERT(ForkJoinContext::current() == cx);
#ifdef JSGC_FJGENERATIONAL
return js::NewGCObject<CanGC>(cx, allocKind, 0, gc::DefaultHeap);
#else
return js::NewGCObject<NoGC>(cx, allocKind, 0, gc::TenuredHeap);
#endif
}
bool

View File

@ -608,11 +608,6 @@ ParallelSafetyVisitor::replace(MInstruction *oldInstruction,
MBasicBlock *block = oldInstruction->block();
block->insertBefore(oldInstruction, replacementInstruction);
oldInstruction->replaceAllUsesWith(replacementInstruction);
MResumePoint *rp = oldInstruction->resumePoint();
if (rp && rp->instruction() == oldInstruction) {
rp->setInstruction(replacementInstruction);
replacementInstruction->setResumePoint(rp);
}
block->discard(oldInstruction);
// We may have replaced a specialized Float32 instruction by its

View File

@ -89,13 +89,6 @@ class JS_PUBLIC_API(AutoGCRooter) {
static void traceAll(JSTracer *trc);
static void traceAllWrappers(JSTracer *trc);
/* T must be a context type */
template<typename T>
static void traceAllInContext(T* cx, JSTracer *trc) {
for (AutoGCRooter *gcr = cx->autoGCRooters; gcr; gcr = gcr->down)
gcr->trace(trc);
}
protected:
AutoGCRooter * const down;

View File

@ -19,8 +19,6 @@
#include "vm/Interpreter.h"
#include "vm/ProxyObject.h"
#include "gc/ForkJoinNursery-inl.h"
namespace js {
#ifdef JS_CRASH_DIAGNOSTICS

View File

@ -769,7 +769,7 @@ Chunk::init(JSRuntime *rt)
/* Initialize the chunk info. */
info.age = 0;
info.trailer.storeBuffer = nullptr;
info.trailer.location = ChunkLocationBitTenuredHeap;
info.trailer.location = ChunkLocationTenuredHeap;
info.trailer.runtime = rt;
/* The rest of info fields are initialized in pickChunk. */
@ -878,17 +878,8 @@ Chunk::allocateArena(Zone *zone, AllocKind thingKind)
JS_ASSERT(hasAvailableArenas());
JSRuntime *rt = zone->runtimeFromAnyThread();
if (!rt->isHeapMinorCollecting() && rt->gc.bytes >= rt->gc.maxBytes) {
#ifdef JSGC_FJGENERATIONAL
// This is an approximation to the best test, which would check that
// this thread is currently promoting into the tenured area. I doubt
// the better test would make much difference.
if (!rt->isFJMinorCollecting())
return nullptr;
#else
if (!rt->isHeapMinorCollecting() && rt->gc.bytes >= rt->gc.maxBytes)
return nullptr;
#endif
}
ArenaHeader *aheader = MOZ_LIKELY(info.numArenasFreeCommitted > 0)
? fetchNextFreeArena(rt)
@ -1621,7 +1612,7 @@ ArenaLists::allocateFromArenaInline(Zone *zone, AllocKind thingKind,
/*
* While we still hold the GC lock get an arena from some chunk, mark it
* as full as its single free span is moved to the free lists, and insert
* as full as its single free span is moved to the free lits, and insert
* it to the list as a fully allocated arena.
*
* We add the arena before the the head, so that after the GC the most
@ -2074,7 +2065,7 @@ GCRuntime::triggerGC(JS::gcreason::Reason reason)
bool
js::TriggerZoneGC(Zone *zone, JS::gcreason::Reason reason)
{
return zone->runtimeFromAnyThread()->gc.triggerZoneGC(zone, reason);
return zone->runtimeFromAnyThread()->gc.triggerZoneGC(zone,reason);
}
bool
@ -2294,10 +2285,6 @@ DecommitArenas(JSRuntime *rt)
static void
ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink)
{
#ifdef JSGC_FJGENERATIONAL
rt->threadPool.pruneChunkCache();
#endif
if (Chunk *toFree = rt->gc.chunkPool.expire(rt, shouldShrink)) {
AutoUnlockGC unlock(rt);
FreeChunkList(rt, toFree);

View File

@ -41,10 +41,6 @@ class ScopeObject;
class Shape;
class UnownedBaseShape;
namespace gc {
class ForkJoinNursery;
}
unsigned GetCPUCount();
enum HeapState {
@ -200,42 +196,6 @@ IsNurseryAllocable(AllocKind kind)
}
#endif
#if defined(JSGC_FJGENERATIONAL)
// This is separate from IsNurseryAllocable() so that the latter can evolve
// without worrying about what the ForkJoinNursery's needs are, and vice
// versa to some extent.
static inline bool
IsFJNurseryAllocable(AllocKind kind)
{
JS_ASSERT(kind >= 0 && unsigned(kind) < FINALIZE_LIMIT);
static const bool map[] = {
false, /* FINALIZE_OBJECT0 */
true, /* FINALIZE_OBJECT0_BACKGROUND */
false, /* FINALIZE_OBJECT2 */
true, /* FINALIZE_OBJECT2_BACKGROUND */
false, /* FINALIZE_OBJECT4 */
true, /* FINALIZE_OBJECT4_BACKGROUND */
false, /* FINALIZE_OBJECT8 */
true, /* FINALIZE_OBJECT8_BACKGROUND */
false, /* FINALIZE_OBJECT12 */
true, /* FINALIZE_OBJECT12_BACKGROUND */
false, /* FINALIZE_OBJECT16 */
true, /* FINALIZE_OBJECT16_BACKGROUND */
false, /* FINALIZE_SCRIPT */
false, /* FINALIZE_LAZY_SCRIPT */
false, /* FINALIZE_SHAPE */
false, /* FINALIZE_BASE_SHAPE */
false, /* FINALIZE_TYPE_OBJECT */
false, /* FINALIZE_FAT_INLINE_STRING */
false, /* FINALIZE_STRING */
false, /* FINALIZE_EXTERNAL_STRING */
false, /* FINALIZE_JITCODE */
};
JS_STATIC_ASSERT(JS_ARRAY_LENGTH(map) == FINALIZE_LIMIT);
return map[kind];
}
#endif
static inline bool
IsBackgroundFinalized(AllocKind kind)
{
@ -822,7 +782,6 @@ class ArenaLists
inline void normalizeBackgroundFinalizeState(AllocKind thingKind);
friend class js::Nursery;
friend class js::gc::ForkJoinNursery;
};
/*

View File

@ -10,7 +10,6 @@
#include "jsgc.h"
#include "gc/Zone.h"
#include "vm/ForkJoin.h"
namespace js {
@ -30,17 +29,8 @@ ThreadSafeContext::isThreadLocal(T thing) const
if (!isForkJoinContext())
return true;
#ifdef JSGC_FJGENERATIONAL
ForkJoinContext *cx = static_cast<ForkJoinContext*>(const_cast<ThreadSafeContext*>(this));
if (cx->fjNursery().isInsideNewspace(thing))
return true;
#endif
// Global invariant
JS_ASSERT(!IsInsideNursery(thing));
// The thing is not in the nursery, but is it in the private tenured area?
if (allocator_->arenas.containsArena(runtime_, thing->arenaHeader()))
if (!IsInsideNursery(thing) &&
allocator_->arenas.containsArena(runtime_, thing->arenaHeader()))
{
// GC should be suppressed in preparation for mutating thread local
// objects, as we don't want to trip any barriers.
@ -74,14 +64,6 @@ ShouldNurseryAllocate(const Nursery &nursery, AllocKind kind, InitialHeap heap)
}
#endif
#ifdef JSGC_FJGENERATIONAL
inline bool
ShouldFJNurseryAllocate(const ForkJoinNursery &nursery, AllocKind kind, InitialHeap heap)
{
return IsFJNurseryAllocable(kind) && heap != TenuredHeap;
}
#endif
inline JSGCTraceKind
GetGCThingTraceKind(const void *thing)
{
@ -121,19 +103,15 @@ class ArenaIter
init(zone, kind);
}
void init(Allocator *allocator, AllocKind kind) {
aheader = allocator->arenas.getFirstArena(kind);
remainingHeader = allocator->arenas.getFirstArenaToSweep(kind);
void init(JS::Zone *zone, AllocKind kind) {
aheader = zone->allocator.arenas.getFirstArena(kind);
remainingHeader = zone->allocator.arenas.getFirstArenaToSweep(kind);
if (!aheader) {
aheader = remainingHeader;
remainingHeader = nullptr;
}
}
void init(JS::Zone *zone, AllocKind kind) {
init(&zone->allocator, kind);
}
bool done() const {
return !aheader;
}
@ -182,11 +160,7 @@ class ArenaCellIterImpl
}
public:
ArenaCellIterImpl()
: firstThingOffset(0) // Squelch
, thingSize(0) // warnings
{
}
ArenaCellIterImpl() {}
void initUnsynchronized(ArenaHeader *aheader) {
AllocKind kind = aheader->getAllocKind();
@ -478,28 +452,6 @@ TryNewNurseryObject(ThreadSafeContext *cxArg, size_t thingSize, size_t nDynamicS
}
#endif /* JSGC_GENERATIONAL */
#ifdef JSGC_FJGENERATIONAL
template <AllowGC allowGC>
inline JSObject *
TryNewFJNurseryObject(ForkJoinContext *cx, size_t thingSize, size_t nDynamicSlots)
{
ForkJoinNursery &nursery = cx->fjNursery();
bool tooLarge = false;
JSObject *obj = nursery.allocateObject(thingSize, nDynamicSlots, tooLarge);
if (obj)
return obj;
if (!tooLarge && allowGC) {
nursery.minorGC();
obj = nursery.allocateObject(thingSize, nDynamicSlots, tooLarge);
if (obj)
return obj;
}
return nullptr;
}
#endif /* JSGC_FJGENERATIONAL */
static inline bool
PossiblyFail()
{
@ -589,16 +541,6 @@ AllocateObject(ThreadSafeContext *cx, AllocKind kind, size_t nDynamicSlots, Init
return obj;
}
#endif
#ifdef JSGC_FJGENERATIONAL
if (cx->isForkJoinContext() &&
ShouldFJNurseryAllocate(cx->asForkJoinContext()->fjNursery(), kind, heap))
{
JSObject *obj =
TryNewFJNurseryObject<allowGC>(cx->asForkJoinContext(), thingSize, nDynamicSlots);
if (obj)
return obj;
}
#endif
HeapSlot *slots = nullptr;
if (nDynamicSlots) {
@ -646,8 +588,6 @@ AllocateNonObject(ThreadSafeContext *cx)
* other hand, since these allocations are extremely common, we don't want to
* delay GC from these allocation sites. Instead we allow the GC, but still
* fail the allocation, forcing the non-cached path.
*
* Observe this won't be used for ForkJoin allocation, as it takes a JSContext*
*/
template <AllowGC allowGC>
inline JSObject *

View File

@ -2837,26 +2837,16 @@ JSObject::setSlotSpan(ThreadSafeContext *cx, HandleObject obj, uint32_t span)
return true;
}
// This will not run the garbage collector. If a nursery cannot accomodate the slot array
// an attempt will be made to place the array in the tenured area.
static HeapSlot *
AllocateSlots(ThreadSafeContext *cx, JSObject *obj, uint32_t nslots)
{
#ifdef JSGC_GENERATIONAL
if (cx->isJSContext())
return cx->asJSContext()->runtime()->gc.nursery.allocateSlots(cx->asJSContext(), obj, nslots);
#endif
#ifdef JSGC_FJGENERATIONAL
if (cx->isForkJoinContext())
return cx->asForkJoinContext()->fjNursery().allocateSlots(obj, nslots);
#endif
return cx->pod_malloc<HeapSlot>(nslots);
}
// This will not run the garbage collector. If a nursery cannot accomodate the slot array
// an attempt will be made to place the array in the tenured area.
//
// If this returns null then the old slots will be left alone.
static HeapSlot *
ReallocateSlots(ThreadSafeContext *cx, JSObject *obj, HeapSlot *oldSlots,
uint32_t oldCount, uint32_t newCount)
@ -2864,14 +2854,8 @@ ReallocateSlots(ThreadSafeContext *cx, JSObject *obj, HeapSlot *oldSlots,
#ifdef JSGC_GENERATIONAL
if (cx->isJSContext()) {
return cx->asJSContext()->runtime()->gc.nursery.reallocateSlots(cx->asJSContext(),
obj, oldSlots,
oldCount, newCount);
}
#endif
#ifdef JSGC_FJGENERATIONAL
if (cx->isForkJoinContext()) {
return cx->asForkJoinContext()->fjNursery().reallocateSlots(obj, oldSlots,
oldCount, newCount);
obj, oldSlots,
oldCount, newCount);
}
#endif
return (HeapSlot *)cx->realloc_(oldSlots, oldCount * sizeof(HeapSlot),
@ -2943,14 +2927,10 @@ JSObject::growSlots(ThreadSafeContext *cx, HandleObject obj, uint32_t oldCount,
static void
FreeSlots(ThreadSafeContext *cx, HeapSlot *slots)
{
// Note: threads without a JSContext do not have access to nursery allocated things.
#ifdef JSGC_GENERATIONAL
// Note: threads without a JSContext do not have access to GGC nursery allocated things.
if (cx->isJSContext())
return cx->asJSContext()->runtime()->gc.nursery.freeSlots(cx->asJSContext(), slots);
#endif
#ifdef JSGC_FJGENERATIONAL
if (cx->isForkJoinContext())
return cx->asForkJoinContext()->fjNursery().freeSlots(slots);
#endif
js_free(slots);
}
@ -3167,8 +3147,6 @@ JSObject::maybeDensifySparseElements(js::ExclusiveContext *cx, HandleObject obj)
return ED_OK;
}
// This will not run the garbage collector. If a nursery cannot accomodate the element array
// an attempt will be made to place the array in the tenured area.
static ObjectElements *
AllocateElements(ThreadSafeContext *cx, JSObject *obj, uint32_t nelems)
{
@ -3176,16 +3154,10 @@ AllocateElements(ThreadSafeContext *cx, JSObject *obj, uint32_t nelems)
if (cx->isJSContext())
return cx->asJSContext()->runtime()->gc.nursery.allocateElements(cx->asJSContext(), obj, nelems);
#endif
#ifdef JSGC_FJGENERATIONAL
if (cx->isForkJoinContext())
return cx->asForkJoinContext()->fjNursery().allocateElements(obj, nelems);
#endif
return static_cast<js::ObjectElements *>(cx->malloc_(nelems * sizeof(HeapValue)));
}
// This will not run the garbage collector. If a nursery cannot accomodate the element array
// an attempt will be made to place the array in the tenured area.
static ObjectElements *
ReallocateElements(ThreadSafeContext *cx, JSObject *obj, ObjectElements *oldHeader,
uint32_t oldCount, uint32_t newCount)
@ -3193,14 +3165,8 @@ ReallocateElements(ThreadSafeContext *cx, JSObject *obj, ObjectElements *oldHead
#ifdef JSGC_GENERATIONAL
if (cx->isJSContext()) {
return cx->asJSContext()->runtime()->gc.nursery.reallocateElements(cx->asJSContext(), obj,
oldHeader, oldCount,
newCount);
}
#endif
#ifdef JSGC_FJGENERATIONAL
if (cx->isForkJoinContext()) {
return cx->asForkJoinContext()->fjNursery().reallocateElements(obj, oldHeader,
oldCount, newCount);
oldHeader, oldCount,
newCount);
}
#endif

View File

@ -193,10 +193,6 @@ DenseRangeWriteBarrierPost(JSRuntime *rt, JSObject *obj, uint32_t start, uint32_
#endif
}
namespace gc {
class ForkJoinNursery;
}
} /* namespace js */
/*
@ -216,7 +212,6 @@ class JSObject : public js::ObjectImpl
friend struct js::GCMarker;
friend class js::NewObjectCache;
friend class js::Nursery;
friend class js::gc::ForkJoinNursery;
/* Make the type object to use for LAZY_TYPE objects. */
static js::types::TypeObject *makeLazyType(JSContext *cx, js::HandleObject obj);

View File

@ -21,7 +21,6 @@
#include "jsgcinlines.h"
#include "jsinferinlines.h"
#include "gc/ForkJoinNursery-inl.h"
#include "vm/ObjectImpl-inl.h"
/* static */ inline bool

View File

@ -112,7 +112,6 @@ UNIFIED_SOURCES += [
'frontend/ParseNode.cpp',
'frontend/TokenStream.cpp',
'gc/Barrier.cpp',
'gc/ForkJoinNursery.cpp',
'gc/Iteration.cpp',
'gc/Marking.cpp',
'gc/Memory.cpp',
@ -458,8 +457,6 @@ if CONFIG['NIGHTLY_BUILD']:
DEFINES['ENABLE_PARALLEL_JS'] = True
DEFINES['ENABLE_BINARYDATA'] = True
DEFINES['ENABLE_SHARED_ARRAY_BUFFER'] = True
if CONFIG['JSGC_GENERATIONAL_CONFIGURED']:
DEFINES['JSGC_FJGENERATIONAL'] = True
DEFINES['EXPORT_JS_API'] = True

View File

@ -799,14 +799,8 @@ ArrayBufferObject::finalize(FreeOp *fop, JSObject *obj)
/* static */ void
ArrayBufferObject::obj_trace(JSTracer *trc, JSObject *obj)
{
if (!IS_GC_MARKING_TRACER(trc) && !trc->runtime()->isHeapMinorCollecting()
#ifdef JSGC_FJGENERATIONAL
&& !trc->runtime()->isFJMinorCollecting()
#endif
)
{
if (!IS_GC_MARKING_TRACER(trc) && !trc->runtime()->isHeapMinorCollecting())
return;
}
// ArrayBufferObjects need to maintain a list of possibly-weak pointers to
// their views. The straightforward way to update the weak pointers would

View File

@ -27,7 +27,7 @@
#if defined(JS_THREADSAFE) && defined(JS_ION)
# include "jit/JitCommon.h"
# ifdef FORKJOIN_SPEW
# ifdef DEBUG
# include "jit/Ion.h"
# include "jit/JitCompartment.h"
# include "jit/MIR.h"
@ -35,7 +35,6 @@
# endif
#endif // THREADSAFE && ION
#include "gc/ForkJoinNursery-inl.h"
#include "vm/Interpreter-inl.h"
using namespace js;
@ -280,7 +279,7 @@ class ForkJoinOperation
jsbytecode *bailoutBytecode;
ForkJoinOperation(JSContext *cx, HandleFunction fun, uint16_t sliceStart,
uint16_t sliceEnd, ForkJoinMode mode, HandleObject updatable);
uint16_t sliceEnd, ForkJoinMode mode);
ExecutionStatus apply();
private:
@ -319,7 +318,6 @@ class ForkJoinOperation
JSContext *cx_;
HandleFunction fun_;
HandleObject updatable_;
uint16_t sliceStart_;
uint16_t sliceEnd_;
Vector<ParallelBailoutRecord, 16> bailoutRecords_;
@ -347,17 +345,12 @@ class ForkJoinOperation
class ForkJoinShared : public ParallelJob, public Monitor
{
#ifdef JSGC_FJGENERATIONAL
friend class gc::ForkJoinGCShared;
#endif
/////////////////////////////////////////////////////////////////////////
// Constant fields
JSContext *const cx_; // Current context
ThreadPool *const threadPool_; // The thread pool
HandleFunction fun_; // The JavaScript function to execute
HandleObject updatable_; // Pre-existing object that might be updated
uint16_t sliceStart_; // The starting slice id.
uint16_t sliceEnd_; // The ending slice id + 1.
PRLock *cxLock_; // Locks cx_ for parallel VM calls
@ -394,7 +387,6 @@ class ForkJoinShared : public ParallelJob, public Monitor
ForkJoinShared(JSContext *cx,
ThreadPool *threadPool,
HandleFunction fun,
HandleObject updatable,
uint16_t sliceStart,
uint16_t sliceEnd,
ParallelBailoutRecord *records);
@ -436,8 +428,6 @@ class ForkJoinShared : public ParallelJob, public Monitor
JSContext *acquireJSContext() { PR_Lock(cxLock_); return cx_; }
void releaseJSContext() { PR_Unlock(cxLock_); }
HandleObject updatable() { return updatable_; }
};
class AutoEnterWarmup
@ -512,26 +502,24 @@ static const char *ForkJoinModeString(ForkJoinMode mode);
bool
js::ForkJoin(JSContext *cx, CallArgs &args)
{
JS_ASSERT(args.length() == 5); // else the self-hosted code is wrong
JS_ASSERT(args.length() == 4); // else the self-hosted code is wrong
JS_ASSERT(args[0].isObject());
JS_ASSERT(args[0].toObject().is<JSFunction>());
JS_ASSERT(args[1].isInt32());
JS_ASSERT(args[2].isInt32());
JS_ASSERT(args[3].isInt32());
JS_ASSERT(args[3].toInt32() < NumForkJoinModes);
JS_ASSERT(args[4].isObjectOrNull());
RootedFunction fun(cx, &args[0].toObject().as<JSFunction>());
uint16_t sliceStart = (uint16_t)(args[1].toInt32());
uint16_t sliceEnd = (uint16_t)(args[2].toInt32());
ForkJoinMode mode = (ForkJoinMode)(args[3].toInt32());
RootedObject updatable(cx, args[4].toObjectOrNull());
MOZ_ASSERT(sliceStart == args[1].toInt32());
MOZ_ASSERT(sliceEnd == args[2].toInt32());
MOZ_ASSERT(sliceStart <= sliceEnd);
ForkJoinOperation op(cx, fun, sliceStart, sliceEnd, mode, updatable);
ForkJoinOperation op(cx, fun, sliceStart, sliceEnd, mode);
ExecutionStatus status = op.apply();
if (status == ExecutionFatal)
return false;
@ -590,14 +578,13 @@ ForkJoinModeString(ForkJoinMode mode) {
}
ForkJoinOperation::ForkJoinOperation(JSContext *cx, HandleFunction fun, uint16_t sliceStart,
uint16_t sliceEnd, ForkJoinMode mode, HandleObject updatable)
uint16_t sliceEnd, ForkJoinMode mode)
: bailouts(0),
bailoutCause(ParallelBailoutNone),
bailoutScript(cx),
bailoutBytecode(nullptr),
cx_(cx),
fun_(fun),
updatable_(updatable),
sliceStart_(sliceStart),
sliceEnd_(sliceEnd),
bailoutRecords_(cx),
@ -1250,8 +1237,7 @@ ForkJoinOperation::parallelExecution(ExecutionStatus *status)
ForkJoinActivation activation(cx_);
ThreadPool *threadPool = &cx_->runtime()->threadPool;
ForkJoinShared shared(cx_, threadPool, fun_, updatable_, sliceStart_, sliceEnd_,
&bailoutRecords_[0]);
ForkJoinShared shared(cx_, threadPool, fun_, sliceStart_, sliceEnd_, &bailoutRecords_[0]);
if (!shared.init()) {
*status = ExecutionFatal;
return RedLight;
@ -1347,8 +1333,7 @@ class ParallelIonInvoke
bool invoke(ForkJoinContext *cx) {
JitActivation activation(cx);
// In-out parameter: on input it denotes the number of values to preserve after the call.
Value result = Int32Value(0);
Value result;
CALL_GENERATED_CODE(enter_, jitcode_, argc_ + 1, argv_ + 1, nullptr, calleeToken_,
nullptr, 0, &result);
return !result.isMagic();
@ -1362,14 +1347,12 @@ class ParallelIonInvoke
ForkJoinShared::ForkJoinShared(JSContext *cx,
ThreadPool *threadPool,
HandleFunction fun,
HandleObject updatable,
uint16_t sliceStart,
uint16_t sliceEnd,
ParallelBailoutRecord *records)
: cx_(cx),
threadPool_(threadPool),
fun_(fun),
updatable_(updatable),
sliceStart_(sliceStart),
sliceEnd_(sliceEnd),
cxLock_(nullptr),
@ -1442,15 +1425,12 @@ ForkJoinShared::execute()
// Push parallel tasks and wait until they're all done.
jobResult = threadPool_->executeJob(cx_, this, sliceStart_, sliceEnd_);
if (jobResult == TP_FATAL)
return TP_FATAL;
}
// Arenas must be transfered unconditionally until we have the means
// to clear the ForkJoin result array, see bug 993347.
transferArenasToCompartmentAndProcessGCRequests();
if (jobResult == TP_FATAL)
return TP_FATAL;
// Check if any of the workers failed.
if (abort_) {
if (fatal_)
@ -1458,15 +1438,11 @@ ForkJoinShared::execute()
return TP_RETRY_SEQUENTIALLY;
}
#ifdef FORKJOIN_SPEW
#ifdef DEBUG
Spew(SpewOps, "Completed parallel job [slices: %d, threads: %d, stolen: %d (work stealing:%s)]",
sliceEnd_ - sliceStart_,
threadPool_->numWorkers(),
#ifdef DEBUG
threadPool_->stolenSlices(),
#else
0,
#endif
threadPool_->workStealing() ? "ON" : "OFF");
#endif
@ -1482,7 +1458,6 @@ ForkJoinShared::transferArenasToCompartmentAndProcessGCRequests()
comp->adoptWorkerAllocator(allocators_[i]);
if (gcRequested_) {
Spew(SpewGC, "Triggering garbage collection in SpiderMonkey heap");
if (!gcZone_)
TriggerGC(cx_->runtime(), gcReason_);
else
@ -1518,22 +1493,7 @@ ForkJoinShared::executeFromWorker(ThreadPoolWorker *worker, uintptr_t stackLimit
bool
ForkJoinShared::executeFromMainThread(ThreadPoolWorker *worker)
{
// Note that we need new PerThreadData on the main thread as well,
// so that PJS GC does not walk up the old mainThread stack.
PerThreadData *oldData = TlsPerThreadData.get();
PerThreadData thisThread(cx_->runtime());
if (!thisThread.init()) {
setAbortFlagAndRequestInterrupt(true);
return false;
}
TlsPerThreadData.set(&thisThread);
// Don't use setIonStackLimit() because that acquires the ionStackLimitLock, and the
// lock has not been initialized in these cases.
thisThread.jitStackLimit = oldData->jitStackLimit;
executePortion(&thisThread, worker);
TlsPerThreadData.set(oldData);
executePortion(&cx_->mainThread(), worker);
return !abort_;
}
@ -1552,7 +1512,7 @@ ForkJoinShared::executePortion(PerThreadData *perThread, ThreadPoolWorker *worke
// assertion here for maximum clarity.
JS::AutoSuppressGCAnalysis nogc;
#ifdef FORKJOIN_SPEW
#ifdef DEBUG
// Set the maximum worker and slice number for prettier spewing.
cx.maxWorkerId = threadPool_->numWorkers();
#endif
@ -1584,36 +1544,8 @@ ForkJoinShared::executePortion(PerThreadData *perThread, ThreadPoolWorker *worke
bool ok = fii.invoke(&cx);
JS_ASSERT(ok == !cx.bailoutRecord->topScript);
if (!ok) {
if (!ok)
setAbortFlagAndRequestInterrupt(false);
#ifdef JSGC_FJGENERATIONAL
// TODO: See bugs 1010169, 993347.
//
// It is not desirable to promote here, but if we don't do
// this then we can't unconditionally transfer arenas to
// the compartment, since the arenas can contain objects
// that point into the nurseries. If those objects are
// touched at all by the GC, eg as part of a prebarrier,
// then chaos ensues.
//
// The proper fix might appear to be to note the abort and
// not transfer, but instead clear, the arenas. However,
// the result array will remain live and unless it is
// cleared immediately and without running barriers then
// it will have pointers into the now-cleared areas, which
// is also wrong.
//
// For the moment, until we figure out how to clear the
// result array properly and implement that, it may be
// that the best thing we can do here is to evacuate and
// then let the GC run its course.
cx.evacuateLiveData();
#endif
} else {
#ifdef JSGC_FJGENERATIONAL
cx.evacuateLiveData();
#endif
}
}
Spew(SpewOps, "Down");
@ -1676,49 +1608,6 @@ ForkJoinShared::requestZoneGC(JS::Zone *zone, JS::gcreason::Reason reason)
}
}
#ifdef JSGC_FJGENERATIONAL
JSRuntime*
js::gc::ForkJoinGCShared::runtime()
{
return shared_->runtime();
}
JS::Zone*
js::gc::ForkJoinGCShared::zone()
{
return shared_->zone();
}
JSObject*
js::gc::ForkJoinGCShared::updatable()
{
return shared_->updatable();
}
js::gc::ForkJoinNurseryChunk *
js::gc::ForkJoinGCShared::allocateNurseryChunk()
{
return shared_->threadPool_->getChunk();
}
void
js::gc::ForkJoinGCShared::freeNurseryChunk(js::gc::ForkJoinNurseryChunk *p)
{
shared_->threadPool_->putFreeChunk(p);
}
void
js::gc::ForkJoinGCShared::spewGC(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
SpewVA(SpewGC, fmt, ap);
va_end(ap);
}
#endif // JSGC_FJGENERATIONAL
/////////////////////////////////////////////////////////////////////////////
// ForkJoinContext
//
@ -1731,10 +1620,6 @@ ForkJoinContext::ForkJoinContext(PerThreadData *perThreadData, ThreadPoolWorker
targetRegionStart(nullptr),
targetRegionEnd(nullptr),
shared_(shared),
#ifdef JSGC_FJGENERATIONAL
gcShared_(shared),
fjNursery_(const_cast<ForkJoinContext*>(this), &this->gcShared_, allocator),
#endif
worker_(worker),
acquiredJSContext_(false),
nogc_()
@ -1894,7 +1779,7 @@ js::ParallelBailoutRecord::addTrace(JSScript *script,
// Debug spew
//
#ifdef FORKJOIN_SPEW
#ifdef DEBUG
static const char *
ExecutionStatusToString(ExecutionStatus status)
@ -1988,8 +1873,6 @@ class ParallelSpewer
active[SpewCompile] = true;
if (strstr(env, "bailouts"))
active[SpewBailouts] = true;
if (strstr(env, "gc"))
active[SpewGC] = true;
if (strstr(env, "full")) {
for (uint32_t i = 0; i < NumSpewChannels; i++)
active[i] = true;
@ -2194,12 +2077,6 @@ parallel::Spew(SpewChannel channel, const char *fmt, ...)
va_end(ap);
}
void
parallel::SpewVA(SpewChannel channel, const char *fmt, va_list ap)
{
spewer.spewVA(channel, fmt, ap);
}
void
parallel::SpewBeginOp(JSContext *cx, const char *name)
{
@ -2248,7 +2125,7 @@ parallel::SpewBailoutIR(IonLIRTraceData *data)
spewer.spewBailoutIR(data);
}
#endif // FORKJOIN_SPEW
#endif // DEBUG
bool
js::InExclusiveParallelSection()

View File

@ -9,19 +9,12 @@
#include "mozilla/ThreadLocal.h"
#include <stdarg.h>
#include "jscntxt.h"
#include "gc/ForkJoinNursery.h"
#include "gc/GCInternals.h"
#include "jit/Ion.h"
#ifdef DEBUG
#define FORKJOIN_SPEW
#endif
///////////////////////////////////////////////////////////////////////////
// Read Me First
//
@ -37,7 +30,7 @@
// to enable parallel execution. At the top-level, it consists of a native
// function (exposed as the ForkJoin intrinsic) that is used like so:
//
// ForkJoin(func, sliceStart, sliceEnd, mode, updatable)
// ForkJoin(func, sliceStart, sliceEnd, mode)
//
// The intention of this statement is to start some some number (usually the
// number of hardware threads) of copies of |func()| running in parallel. Each
@ -54,13 +47,6 @@
// The fourth argument, |mode|, is an internal mode integer giving finer
// control over the behavior of ForkJoin. See the |ForkJoinMode| enum.
//
// The fifth argument, |updatable|, if not null, is an object that may
// be updated in a race-free manner by |func()| or its callees.
// Typically this is some sort of pre-sized array. Only this object
// may be updated by |func()|, and updates must not race. (A more
// general approach is perhaps desirable, eg passing an Array of
// objects that may be updated, but that is not presently needed.)
//
// func() should expect the following arguments:
//
// func(workerId, sliceStart, sliceEnd)
@ -178,7 +164,7 @@
// the error location might not be in the same JSScript as the one
// which was executing due to inlining.
//
// Garbage collection, allocation, and write barriers:
// Garbage collection and allocation:
//
// Code which executes on these parallel threads must be very careful
// with respect to garbage collection and allocation. The typical
@ -187,49 +173,24 @@
// any synchronization. They can also trigger GC in an ad-hoc way.
//
// To deal with this, the forkjoin code creates a distinct |Allocator|
// object for each worker, which is used as follows.
//
// In a non-generational setting you can access the appropriate
// allocator via the |ForkJoinContext| object that is provided to the
// callbacks. Once the parallel execution is complete, all the
// objects found in these distinct |Allocator| are merged back into
// the main compartment lists and things proceed normally. (If it is
// known that the result array contains no references then no merging
// is necessary.)
//
// In a generational setting there is a per-thread |ForkJoinNursery|
// in addition to the per-thread Allocator. All "simple" objects
// (meaning they are reasonably small, can be copied, and have no
// complicated finalization semantics) are allocated in the nurseries;
// other objects are allocated directly in the threads' Allocators,
// which serve as the tenured areas for the threads.
//
// When a thread's nursery fills up it can be collected independently
// of the other threads' nurseries, and does not require any of the
// threads to bail out of the parallel section. The nursery is
// copy-collected, and the expectation is that the survival rate will
// be very low and the collection will be very cheap.
//
// When the parallel execution is complete, and only if merging of the
// Allocators into the main compartment is necessary, then the live
// objects of the nurseries are copied into the respective Allocators,
// in parallel, before the merging takes place.
// object for each slice. You can access the appropriate object via
// the |ForkJoinContext| object that is provided to the callbacks. Once
// the execution is complete, all the objects found in these distinct
// |Allocator| is merged back into the main compartment lists and
// things proceed normally.
//
// In Ion-generated code, we will do allocation through the
// |ForkJoinNursery| or |Allocator| found in |ForkJoinContext| (which
// is obtained via TLS).
//
// No write barriers are emitted. We permit writes to thread-local
// objects, and such writes can create cross-generational pointers or
// pointers that may interact with incremental GC. However, the
// per-thread generational collector scans its entire tenured area on
// each minor collection, and we block upon entering a parallel
// section to ensure that any concurrent marking or incremental GC has
// completed.
// |Allocator| found in |ForkJoinContext| (which is obtained via TLS).
// Also, no write barriers are emitted. Conceptually, we should never
// need a write barrier because we only permit writes to objects that
// are newly allocated, and such objects are always black (to use
// incremental GC terminology). However, to be safe, we also block
// upon entering a parallel section to ensure that any concurrent
// marking or incremental GC has completed.
//
// In the future, it should be possible to lift the restriction that
// we must block until incremental GC has completed. But we're not
// there yet.
// we must block until inc. GC has completed and also to permit GC
// during parallel exeution. But we're not there yet.
//
// Load balancing (work stealing):
//
@ -355,7 +316,7 @@ class ForkJoinContext : public ThreadSafeContext
// Bailout record used to record the reason this thread stopped executing
ParallelBailoutRecord *const bailoutRecord;
#ifdef FORKJOIN_SPEW
#ifdef DEBUG
// Records the last instr. to execute on this thread.
IonLIRTraceData traceData;
@ -451,21 +412,6 @@ class ForkJoinContext : public ThreadSafeContext
return offsetof(ForkJoinContext, worker_);
}
#ifdef JSGC_FJGENERATIONAL
// There is already a nursery() method in ThreadSafeContext.
gc::ForkJoinNursery &fjNursery() { return fjNursery_; }
// Evacuate live data from the per-thread nursery into the per-thread
// tenured area.
void evacuateLiveData() { fjNursery_.evacuatingGC(); }
// Used in inlining nursery allocation. Note the nursery is a
// member of the ForkJoinContext (a substructure), not a pointer.
static size_t offsetOfFJNursery() {
return offsetof(ForkJoinContext, fjNursery_);
}
#endif
private:
friend class AutoSetForkJoinContext;
@ -474,11 +420,6 @@ class ForkJoinContext : public ThreadSafeContext
ForkJoinShared *const shared_;
#ifdef JSGC_FJGENERATIONAL
gc::ForkJoinGCShared gcShared_;
gc::ForkJoinNursery fjNursery_;
#endif
ThreadPoolWorker *worker_;
bool acquiredJSContext_;
@ -563,15 +504,13 @@ enum SpewChannel {
SpewOps,
SpewCompile,
SpewBailouts,
SpewGC,
NumSpewChannels
};
#if defined(FORKJOIN_SPEW) && defined(JS_THREADSAFE) && defined(JS_ION)
#if defined(DEBUG) && defined(JS_THREADSAFE) && defined(JS_ION)
bool SpewEnabled(SpewChannel channel);
void Spew(SpewChannel channel, const char *fmt, ...);
void SpewVA(SpewChannel channel, const char *fmt, va_list args);
void SpewBeginOp(JSContext *cx, const char *name);
void SpewBailout(uint32_t count, HandleScript script, jsbytecode *pc,
ParallelBailoutCause cause);
@ -585,7 +524,6 @@ void SpewBailoutIR(IonLIRTraceData *data);
static inline bool SpewEnabled(SpewChannel channel) { return false; }
static inline void Spew(SpewChannel channel, const char *fmt, ...) { }
static inline void SpewVA(SpewChannel channel, const char *fmt, va_list args) { }
static inline void SpewBeginOp(JSContext *cx, const char *name) { }
static inline void SpewBailout(uint32_t count, HandleScript script,
jsbytecode *pc, ParallelBailoutCause cause) {}
@ -597,7 +535,7 @@ static inline void SpewMIR(jit::MDefinition *mir, const char *fmt, ...) { }
#endif
static inline void SpewBailoutIR(IonLIRTraceData *data) { }
#endif // FORKJOIN_SPEW && JS_THREADSAFE && JS_ION
#endif // DEBUG && JS_THREADSAFE && JS_ION
} // namespace parallel
} // namespace js

View File

@ -30,10 +30,6 @@ class ObjectImpl;
class Nursery;
class Shape;
namespace gc {
class ForkJoinNursery;
}
/*
* To really poison a set of values, using 'magic' or 'undefined' isn't good
* enough since often these will just be ignored by buggy code (see bug 629974)
@ -181,7 +177,6 @@ class ObjectElements
friend class ObjectImpl;
friend class ArrayObject;
friend class Nursery;
friend class gc::ForkJoinNursery;
template <ExecutionMode mode>
friend bool
@ -450,7 +445,6 @@ class ObjectImpl : public gc::BarrieredCell<ObjectImpl>
private:
friend class Nursery;
friend class gc::ForkJoinNursery;
/*
* Get internal pointers to the range of values starting at start and

View File

@ -953,15 +953,6 @@ struct JSRuntime : public JS::shadow::Runtime,
bool isHeapMinorCollecting() { return gc.isHeapMinorCollecting(); }
bool isHeapCollecting() { return gc.isHeapCollecting(); }
// Performance note: if isFJMinorCollecting turns out to be slow
// because reading the counter is slow then we may be able to
// augment the counter with a volatile flag that is set iff the
// counter is greater than zero. (It will require some care to
// make sure the two variables stay in sync.)
bool isFJMinorCollecting() { return gc.fjCollectionCounter > 0; }
void incFJMinorCollecting() { gc.fjCollectionCounter++; }
void decFJMinorCollecting() { gc.fjCollectionCounter--; }
#ifdef JS_GC_ZEAL
int gcZeal() { return gc.zealMode; }

View File

@ -296,11 +296,7 @@ JS_JITINFO_NATIVE_PARALLEL_THREADSAFE(intrinsic_ParallelSpew_jitInfo, intrinsic_
#endif
/*
* ForkJoin(func, sliceStart, sliceEnd, mode, updatable): Invokes |func| many times in parallel.
*
* If "func" will update a pre-existing object then that object /must/ be passed
* as the object "updatable". It is /not/ correct to pass an object that
* references the updatable objects indirectly.
* ForkJoin(func, feedback): Invokes |func| many times in parallel.
*
* See ForkJoin.cpp for details and ParallelArray.js for examples.
*/
@ -783,7 +779,7 @@ static const JSFunctionSpec intrinsic_functions[] = {
JS_FN("NewStringIterator", intrinsic_NewStringIterator, 0,0),
JS_FN("IsStringIterator", intrinsic_IsStringIterator, 1,0),
JS_FN("ForkJoin", intrinsic_ForkJoin, 5,0),
JS_FN("ForkJoin", intrinsic_ForkJoin, 2,0),
JS_FN("ForkJoinNumWorkers", intrinsic_ForkJoinNumWorkers, 0,0),
JS_FN("NewDenseArray", intrinsic_NewDenseArray, 1,0),
JS_FN("ShouldForceSequential", intrinsic_ShouldForceSequential, 0,0),

View File

@ -22,7 +22,6 @@
#include "jscntxtinlines.h"
#include "jsobjinlines.h"
#include "gc/ForkJoinNursery-inl.h"
#include "vm/ObjectImpl-inl.h"
#include "vm/Runtime-inl.h"

View File

@ -111,10 +111,6 @@ class Nursery;
class ObjectImpl;
class StaticBlockObject;
namespace gc {
class ForkJoinNursery;
}
typedef JSPropertyOp PropertyOp;
typedef JSStrictPropertyOp StrictPropertyOp;
typedef JSPropertyDescriptor PropertyDescriptor;
@ -616,7 +612,6 @@ class Shape : public gc::BarrieredCell<Shape>
friend class ::JSFunction;
friend class js::Bindings;
friend class js::Nursery;
friend class js::gc::ForkJoinNursery;
friend class js::ObjectImpl;
friend class js::PropertyTree;
friend class js::StaticBlockObject;

View File

@ -437,13 +437,14 @@ MarkInterpreterActivation(JSTracer *trc, InterpreterActivation *act)
}
void
js::MarkInterpreterActivations(PerThreadData *ptd, JSTracer *trc)
js::MarkInterpreterActivations(JSRuntime *rt, JSTracer *trc)
{
for (ActivationIterator iter(ptd); !iter.done(); ++iter) {
for (ActivationIterator iter(rt); !iter.done(); ++iter) {
Activation *act = iter.activation();
if (act->isInterpreter())
MarkInterpreterActivation(trc, act->asInterpreter());
}
}
/*****************************************************************************/

View File

@ -1093,7 +1093,7 @@ class InterpreterStack
}
};
void MarkInterpreterActivations(PerThreadData *ptd, JSTracer *trc);
void MarkInterpreterActivations(JSRuntime *rt, JSTracer *trc);
/*****************************************************************************/

View File

@ -10,15 +10,10 @@
#include "jslock.h"
#include "js/Utility.h"
#include "vm/ForkJoin.h"
#include "vm/Monitor.h"
#include "vm/Runtime.h"
#ifdef JSGC_FJGENERATIONAL
#include "prmjtime.h"
#endif
using namespace js;
const size_t WORKER_THREAD_STACK_SIZE = 1*1024*1024;
@ -261,24 +256,18 @@ ThreadPool::ThreadPool(JSRuntime *rt)
: activeWorkers_(0),
joinBarrier_(nullptr),
job_(nullptr),
runtime_(rt),
#ifdef DEBUG
runtime_(rt),
stolenSlices_(0),
#endif
pendingSlices_(0),
isMainThreadActive_(false),
chunkLock_(nullptr),
timeOfLastAllocation_(0),
freeChunks_(nullptr)
isMainThreadActive_(false)
{ }
ThreadPool::~ThreadPool()
{
terminateWorkers();
clearChunkCache();
#ifdef JS_THREADSAFE
if (chunkLock_)
PR_DestroyLock(chunkLock_);
if (joinBarrier_)
PR_DestroyCondVar(joinBarrier_);
#endif
@ -291,13 +280,10 @@ ThreadPool::init()
if (!Monitor::init())
return false;
joinBarrier_ = PR_NewCondVar(lock_);
if (!joinBarrier_)
return false;
chunkLock_ = PR_NewLock();
if (!chunkLock_)
return false;
#endif
return !!joinBarrier_;
#else
return true;
#endif
}
uint32_t
@ -496,92 +482,3 @@ ThreadPool::abortJob()
// the thread pool having more work.
while (hasWork());
}
// We are not using the markPagesUnused() / markPagesInUse() APIs here
// for two reasons. One, the free list is threaded through the
// chunks, so some pages are actually in use. Two, the expectation is
// that a small number of chunks will be used intensively for a short
// while and then be abandoned at the next GC.
//
// It's an open question whether it's best to go directly to the
// pageAllocator, as now, or go via the GC's chunk pool. Either way
// there's a need to manage a predictable chunk cache here as we don't
// want chunks to be deallocated during a parallel section.
gc::ForkJoinNurseryChunk *
ThreadPool::getChunk()
{
#ifdef JSGC_FJGENERATIONAL
PR_Lock(chunkLock_);
timeOfLastAllocation_ = PRMJ_Now()/1000000;
ChunkFreeList *p = freeChunks_;
if (p)
freeChunks_ = p->next;
PR_Unlock(chunkLock_);
if (p) {
// Already poisoned.
return reinterpret_cast<gc::ForkJoinNurseryChunk *>(p);
}
gc::ForkJoinNurseryChunk *c =
reinterpret_cast<gc::ForkJoinNurseryChunk *>(
runtime_->gc.pageAllocator.mapAlignedPages(gc::ChunkSize, gc::ChunkSize));
if (!c)
return c;
poisonChunk(c);
return c;
#else
return nullptr;
#endif
}
void
ThreadPool::putFreeChunk(gc::ForkJoinNurseryChunk *c)
{
#ifdef JSGC_FJGENERATIONAL
poisonChunk(c);
PR_Lock(chunkLock_);
ChunkFreeList *p = reinterpret_cast<ChunkFreeList *>(c);
p->next = freeChunks_;
freeChunks_ = p;
PR_Unlock(chunkLock_);
#endif
}
void
ThreadPool::poisonChunk(gc::ForkJoinNurseryChunk *c)
{
#ifdef JSGC_FJGENERATIONAL
#ifdef DEBUG
memset(c, JS_POISONED_FORKJOIN_CHUNK, gc::ChunkSize);
#endif
c->trailer.runtime = nullptr;
#endif
}
void
ThreadPool::pruneChunkCache()
{
#ifdef JSGC_FJGENERATIONAL
if (PRMJ_Now()/1000000 - timeOfLastAllocation_ >= secondsBeforePrune)
clearChunkCache();
#endif
}
void
ThreadPool::clearChunkCache()
{
#ifdef JSGC_FJGENERATIONAL
PR_Lock(chunkLock_);
ChunkFreeList *p = freeChunks_;
freeChunks_ = nullptr;
PR_Unlock(chunkLock_);
while (p) {
ChunkFreeList *victim = p;
p = p->next;
runtime_->gc.pageAllocator.unmapPages(victim, gc::ChunkSize);
}
#endif
}

View File

@ -24,10 +24,6 @@ namespace js {
class ThreadPool;
namespace gc {
struct ForkJoinNurseryChunk;
}
/////////////////////////////////////////////////////////////////////////////
// ThreadPoolWorker
//
@ -178,9 +174,10 @@ class ThreadPool : public Monitor
// The current job.
ParallelJob *job_;
#ifdef DEBUG
// Initialized at startup only.
JSRuntime *const runtime_;
#ifdef DEBUG
// Number of stolen slices in the last parallel job.
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> stolenSlices_;
#endif
@ -253,80 +250,6 @@ class ThreadPool : public Monitor
// Abort the current job.
void abortJob();
// Chunk pool for the PJS parallel nurseries. The nurseries need
// to have a useful pool of cheap chunks, they cannot map/unmap
// chunks as needed, as that slows down collection much too much.
//
// Technically the following should be #ifdef JSGC_FJGENERATIONAL
// but that affects the observed size of JSRuntime, of which
// ThreadPool is a member. JSGC_FJGENERATIONAL can only be set if
// PJS is enabled, but the latter is enabled in js/src/moz.build;
// meanwhile, JSGC_FJGENERATIONAL must be enabled globally if it
// is enabled at all, since plenty of Firefox code includes files
// to make JSRuntime visible. JSGC_FJGENERATIONAL will go away
// soon, in the mean time the problem is resolved by not making
// definitions exported from SpiderMonkey dependent on it.
// Obtain chunk memory from the cache, or allocate new. In debug
// mode poison the memory, see poisionChunk().
//
// Returns nullptr on OOM.
gc::ForkJoinNurseryChunk *getChunk();
// Free chunk memory to the cache. In debug mode poison it, see
// poisionChunk().
void putFreeChunk(gc::ForkJoinNurseryChunk *mem);
// If enough time has passed since any allocation activity on the
// chunk pool then release any free chunks. It's meaningful to
// call this from the main GC's chunk expiry mechanism; it has low
// cost if it does not do anything.
//
// This must be called with the GC lock taken.
void pruneChunkCache();
private:
// Ignore requests to prune the pool until this number of seconds
// has passed since the last allocation request.
static const int32_t secondsBeforePrune = 10;
// This lock controls access to the following variables and to the
// 'next' field of any ChunkFreeList object reachable from freeChunks_.
//
// You will be tempted to remove this lock and instead introduce a
// lock-free push/pop data structure using Atomic.compareExchange.
// Before you do that, consider that such a data structure
// implemented naively is vulnerable to the ABA problem in a way
// that leads to a corrupt free list; the problem occurs in
// practice during very heavily loaded runs where preeption
// windows can be long (eg, running the parallel jit_tests on all
// cores means having a number of runnable threads quadratic in
// the number of cores). To do better some ABA-defeating scheme
// is needed additionally.
PRLock *chunkLock_;
// Timestamp of last allocation from the chunk pool, in seconds.
int32_t timeOfLastAllocation_;
// This structure overlays the beginning of the chunk when the
// chunk is on the free list; the rest of the chunk is unused.
struct ChunkFreeList {
ChunkFreeList *next;
};
// List of free chunks.
ChunkFreeList *freeChunks_;
// Poison a free chunk by filling with JS_POISONED_FORKJOIN_CHUNK
// and setting the runtime pointer to null.
void poisonChunk(gc::ForkJoinNurseryChunk *c);
// Release the memory of the chunks that are on the free list.
//
// This should be called only from the ThreadPool's destructor or
// from pruneChunkCache().
void clearChunkCache();
};
} // namespace js