mirror of
https://github.com/darlinghq/darling-openjdk.git
synced 2025-02-03 06:51:56 +00:00
Merge
This commit is contained in:
commit
059e07a458
@ -1638,7 +1638,7 @@ public:
|
||||
AbstractGangTask("Shenandoah Evacuate/Update Concurrent Roots Task"),
|
||||
_vm_roots(phase),
|
||||
_weak_roots(phase),
|
||||
_cld_roots(phase) {}
|
||||
_cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()) {}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahEvacOOMScope oom;
|
||||
|
@ -161,7 +161,7 @@ ShenandoahRootEvacuator::ShenandoahRootEvacuator(uint n_workers,
|
||||
ShenandoahRootProcessor(phase),
|
||||
_serial_roots(phase),
|
||||
_vm_roots(phase),
|
||||
_cld_roots(phase),
|
||||
_cld_roots(phase, n_workers),
|
||||
_thread_roots(phase, n_workers > 1),
|
||||
_serial_weak_roots(phase),
|
||||
_weak_roots(phase),
|
||||
@ -179,30 +179,35 @@ void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) {
|
||||
static_cast<CodeBlobToOopClosure*>(&blobsCl);
|
||||
AlwaysTrueClosure always_true;
|
||||
|
||||
// Process serial-claiming roots first
|
||||
_serial_roots.oops_do(oops, worker_id);
|
||||
_serial_weak_roots.weak_oops_do(oops, worker_id);
|
||||
|
||||
// Process light-weight/limited parallel roots then
|
||||
if (_stw_roots_processing) {
|
||||
_vm_roots.oops_do<OopClosure>(oops, worker_id);
|
||||
_weak_roots.oops_do<OopClosure>(oops, worker_id);
|
||||
}
|
||||
|
||||
_dedup_roots.oops_do(&always_true, oops, worker_id);
|
||||
if (_stw_class_unloading) {
|
||||
CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
|
||||
_cld_roots.cld_do(&clds, worker_id);
|
||||
}
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
if (_stw_class_unloading) {
|
||||
_code_roots.code_blobs_do(codes_cl, worker_id);
|
||||
_thread_roots.oops_do(oops, NULL, worker_id);
|
||||
} else {
|
||||
_thread_roots.oops_do(oops, codes_cl, worker_id);
|
||||
}
|
||||
|
||||
_dedup_roots.oops_do(&always_true, oops, worker_id);
|
||||
}
|
||||
|
||||
ShenandoahRootUpdater::ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
|
||||
ShenandoahRootProcessor(phase),
|
||||
_serial_roots(phase),
|
||||
_vm_roots(phase),
|
||||
_cld_roots(phase),
|
||||
_cld_roots(phase, n_workers),
|
||||
_thread_roots(phase, n_workers > 1),
|
||||
_serial_weak_roots(phase),
|
||||
_weak_roots(phase),
|
||||
@ -214,7 +219,7 @@ ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTi
|
||||
ShenandoahRootProcessor(phase),
|
||||
_serial_roots(phase),
|
||||
_vm_roots(phase),
|
||||
_cld_roots(phase),
|
||||
_cld_roots(phase, n_workers),
|
||||
_thread_roots(phase, n_workers > 1),
|
||||
_serial_weak_roots(phase),
|
||||
_weak_roots(phase),
|
||||
@ -232,16 +237,19 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
|
||||
CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong);
|
||||
AlwaysTrueClosure always_true;
|
||||
|
||||
// Process serial-claiming roots first
|
||||
_serial_roots.oops_do(oops, worker_id);
|
||||
_vm_roots.oops_do(oops, worker_id);
|
||||
|
||||
_thread_roots.oops_do(oops, NULL, worker_id);
|
||||
_cld_roots.cld_do(&adjust_cld_closure, worker_id);
|
||||
_code_roots.code_blobs_do(adjust_code_closure, worker_id);
|
||||
|
||||
_serial_weak_roots.weak_oops_do(oops, worker_id);
|
||||
|
||||
// Process light-weight/limited parallel roots then
|
||||
_vm_roots.oops_do(oops, worker_id);
|
||||
_weak_roots.oops_do<OopClosure>(oops, worker_id);
|
||||
_dedup_roots.oops_do(&always_true, oops, worker_id);
|
||||
_cld_roots.cld_do(&adjust_cld_closure, worker_id);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_code_roots.code_blobs_do(adjust_code_closure, worker_id);
|
||||
_thread_roots.oops_do(oops, NULL, worker_id);
|
||||
}
|
||||
|
||||
ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
|
||||
@ -249,7 +257,7 @@ ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
|
||||
_serial_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_thread_roots(ShenandoahPhaseTimings::heap_iteration_roots, false /*is par*/),
|
||||
_vm_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_cld_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_cld_roots(ShenandoahPhaseTimings::heap_iteration_roots, 1),
|
||||
_serial_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_dedup_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
@ -263,15 +271,20 @@ ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
|
||||
MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations);
|
||||
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
|
||||
AlwaysTrueClosure always_true;
|
||||
|
||||
ResourceMark rm;
|
||||
|
||||
// Process serial-claiming roots first
|
||||
_serial_roots.oops_do(oops, 0);
|
||||
_vm_roots.oops_do(oops, 0);
|
||||
_cld_roots.cld_do(&clds, 0);
|
||||
_thread_roots.threads_do(&tc_cl, 0);
|
||||
_code_roots.code_blobs_do(&code, 0);
|
||||
|
||||
_serial_weak_roots.weak_oops_do(oops, 0);
|
||||
|
||||
// Process light-weight/limited parallel roots then
|
||||
_vm_roots.oops_do(oops, 0);
|
||||
_weak_roots.oops_do<OopClosure>(oops, 0);
|
||||
_dedup_roots.oops_do(&always_true, oops, 0);
|
||||
_cld_roots.cld_do(&clds, 0);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_code_roots.code_blobs_do(&code, 0);
|
||||
_thread_roots.threads_do(&tc_cl, 0);
|
||||
}
|
||||
|
@ -218,9 +218,18 @@ public:
|
||||
template <bool CONCURRENT, bool SINGLE_THREADED>
|
||||
class ShenandoahClassLoaderDataRoots {
|
||||
private:
|
||||
ShenandoahSharedSemaphore _semaphore;
|
||||
ShenandoahPhaseTimings::Phase _phase;
|
||||
|
||||
static uint worker_count(uint n_workers) {
|
||||
// Limit concurrency a bit, otherwise it wastes resources when workers are tripping
|
||||
// over each other. This also leaves free workers to process other parts of the root
|
||||
// set, while admitted workers are busy with doing the CLDG walk.
|
||||
return MAX2(1u, MIN2(ShenandoahSharedSemaphore::max_tokens(), n_workers / 2));
|
||||
}
|
||||
|
||||
public:
|
||||
ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase);
|
||||
ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase, uint n_workers);
|
||||
~ShenandoahClassLoaderDataRoots();
|
||||
|
||||
void always_strong_cld_do(CLDClosure* clds, uint worker_id);
|
||||
|
@ -121,7 +121,8 @@ void ShenandoahVMRoots<CONCURRENT>::oops_do(T* cl, uint worker_id) {
|
||||
}
|
||||
|
||||
template <bool CONCURRENT, bool SINGLE_THREADED>
|
||||
ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase) :
|
||||
ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase, uint n_workers) :
|
||||
_semaphore(worker_count(n_workers)),
|
||||
_phase(phase) {
|
||||
if (!SINGLE_THREADED) {
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
@ -145,9 +146,10 @@ void ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::always_strong_
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
|
||||
assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread");
|
||||
ClassLoaderDataGraph::always_strong_cld_do(clds);
|
||||
} else {
|
||||
} else if (_semaphore.try_acquire()) {
|
||||
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id);
|
||||
ClassLoaderDataGraph::always_strong_cld_do(clds);
|
||||
_semaphore.claim_all();
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,9 +159,10 @@ void ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::cld_do(CLDClos
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
|
||||
assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread");
|
||||
ClassLoaderDataGraph::cld_do(clds);
|
||||
} else {
|
||||
} else if (_semaphore.try_acquire()) {
|
||||
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id);
|
||||
ClassLoaderDataGraph::cld_do(clds);
|
||||
_semaphore.claim_all();
|
||||
}
|
||||
}
|
||||
|
||||
@ -204,7 +207,7 @@ ShenandoahRootScanner<ITR>::ShenandoahRootScanner(uint n_workers, ShenandoahPhas
|
||||
_code_roots(phase),
|
||||
_vm_roots(phase),
|
||||
_dedup_roots(phase),
|
||||
_cld_roots(phase) {
|
||||
_cld_roots(phase, n_workers) {
|
||||
}
|
||||
|
||||
template <typename ITR>
|
||||
@ -226,19 +229,23 @@ void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops, CLDC
|
||||
assert(!ShenandoahSafepoint::is_at_shenandoah_safepoint() ||
|
||||
!ShenandoahHeap::heap()->unload_classes(),
|
||||
"Expect class unloading when Shenandoah cycle is running");
|
||||
ResourceMark rm;
|
||||
|
||||
_serial_roots.oops_do(oops, worker_id);
|
||||
_vm_roots.oops_do(oops, worker_id);
|
||||
|
||||
assert(clds != NULL, "Only possible with CLD closure");
|
||||
_cld_roots.cld_do(clds, worker_id);
|
||||
|
||||
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
|
||||
_thread_roots.threads_do(&tc_cl, worker_id);
|
||||
|
||||
AlwaysTrueClosure always_true;
|
||||
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
|
||||
|
||||
ResourceMark rm;
|
||||
|
||||
// Process serial-claiming roots first
|
||||
_serial_roots.oops_do(oops, worker_id);
|
||||
|
||||
// Process light-weight/limited parallel roots then
|
||||
_vm_roots.oops_do(oops, worker_id);
|
||||
_dedup_roots.oops_do(&always_true, oops, worker_id);
|
||||
_cld_roots.cld_do(clds, worker_id);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_thread_roots.threads_do(&tc_cl, worker_id);
|
||||
}
|
||||
|
||||
template <typename ITR>
|
||||
@ -247,9 +254,14 @@ void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oop
|
||||
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
|
||||
ResourceMark rm;
|
||||
|
||||
// Process serial-claiming roots first
|
||||
_serial_roots.oops_do(oops, worker_id);
|
||||
|
||||
// Process light-weight/limited parallel roots then
|
||||
_vm_roots.oops_do(oops, worker_id);
|
||||
_cld_roots.always_strong_cld_do(clds, worker_id);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_thread_roots.threads_do(&tc_cl, worker_id);
|
||||
}
|
||||
|
||||
@ -263,16 +275,20 @@ void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAliv
|
||||
|
||||
CLDToOopClosure clds(keep_alive, ClassLoaderData::_claim_strong);
|
||||
|
||||
// Process serial-claiming roots first
|
||||
_serial_roots.oops_do(keep_alive, worker_id);
|
||||
_vm_roots.oops_do(keep_alive, worker_id);
|
||||
_serial_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
|
||||
// Process light-weight/limited parallel roots then
|
||||
_vm_roots.oops_do(keep_alive, worker_id);
|
||||
_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
_dedup_roots.oops_do(is_alive, keep_alive, worker_id);
|
||||
_cld_roots.cld_do(&clds, worker_id);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_code_roots.code_blobs_do(codes_cl, worker_id);
|
||||
_thread_roots.oops_do(keep_alive, NULL, worker_id);
|
||||
|
||||
_serial_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
_dedup_roots.oops_do(is_alive, keep_alive, worker_id);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTPROCESSOR_INLINE_HPP
|
||||
|
@ -244,4 +244,38 @@ private:
|
||||
|
||||
};
|
||||
|
||||
typedef struct ShenandoahSharedSemaphore {
|
||||
shenandoah_padding(0);
|
||||
volatile ShenandoahSharedValue value;
|
||||
shenandoah_padding(1);
|
||||
|
||||
static uint max_tokens() {
|
||||
return sizeof(ShenandoahSharedValue) * CHAR_MAX;
|
||||
}
|
||||
|
||||
ShenandoahSharedSemaphore(uint tokens) {
|
||||
assert(tokens <= max_tokens(), "sanity");
|
||||
Atomic::release_store_fence(&value, (ShenandoahSharedValue)tokens);
|
||||
}
|
||||
|
||||
bool try_acquire() {
|
||||
while (true) {
|
||||
ShenandoahSharedValue ov = Atomic::load_acquire(&value);
|
||||
if (ov == 0) {
|
||||
return false;
|
||||
}
|
||||
ShenandoahSharedValue nv = ov - 1;
|
||||
if (Atomic::cmpxchg(&value, ov, nv) == ov) {
|
||||
// successfully set
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void claim_all() {
|
||||
Atomic::release_store_fence(&value, (ShenandoahSharedValue)0);
|
||||
}
|
||||
|
||||
} ShenandoahSharedSemaphore;
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSHAREDVARIABLES_HPP
|
||||
|
@ -97,18 +97,6 @@ void ShenandoahStringDedup::oops_do_slow(OopClosure* cl) {
|
||||
StringDedupTable::unlink_or_oops_do(&sd_cl, 0);
|
||||
}
|
||||
|
||||
class ShenandoahIsMarkedNextClosure : public BoolObjectClosure {
|
||||
private:
|
||||
ShenandoahMarkingContext* const _mark_context;
|
||||
|
||||
public:
|
||||
ShenandoahIsMarkedNextClosure() : _mark_context(ShenandoahHeap::heap()->marking_context()) { }
|
||||
|
||||
bool do_object_b(oop obj) {
|
||||
return _mark_context->is_marked(obj);
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Task for parallel unlink_or_oops_do() operation on the deduplication queue
|
||||
// and table.
|
||||
|
Loading…
x
Reference in New Issue
Block a user