mirror of
https://github.com/darlinghq/darling-openjdk.git
synced 2024-11-23 04:19:43 +00:00
8246097: Shenandoah: limit parallelism in CLDG root handling
Reviewed-by: zgu
This commit is contained in:
parent
778f969da7
commit
4dce01703b
@ -1638,7 +1638,7 @@ public:
|
||||
AbstractGangTask("Shenandoah Evacuate/Update Concurrent Roots Task"),
|
||||
_vm_roots(phase),
|
||||
_weak_roots(phase),
|
||||
_cld_roots(phase) {}
|
||||
_cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers()) {}
|
||||
|
||||
void work(uint worker_id) {
|
||||
ShenandoahEvacOOMScope oom;
|
||||
|
@ -161,7 +161,7 @@ ShenandoahRootEvacuator::ShenandoahRootEvacuator(uint n_workers,
|
||||
ShenandoahRootProcessor(phase),
|
||||
_serial_roots(phase),
|
||||
_vm_roots(phase),
|
||||
_cld_roots(phase),
|
||||
_cld_roots(phase, n_workers),
|
||||
_thread_roots(phase, n_workers > 1),
|
||||
_serial_weak_roots(phase),
|
||||
_weak_roots(phase),
|
||||
@ -189,11 +189,13 @@ void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) {
|
||||
_weak_roots.oops_do<OopClosure>(oops, worker_id);
|
||||
}
|
||||
_dedup_roots.oops_do(&always_true, oops, worker_id);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
if (_stw_class_unloading) {
|
||||
CLDToOopClosure clds(oops, ClassLoaderData::_claim_strong);
|
||||
_cld_roots.cld_do(&clds, worker_id);
|
||||
}
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
if (_stw_class_unloading) {
|
||||
_code_roots.code_blobs_do(codes_cl, worker_id);
|
||||
_thread_roots.oops_do(oops, NULL, worker_id);
|
||||
} else {
|
||||
@ -205,7 +207,7 @@ ShenandoahRootUpdater::ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimi
|
||||
ShenandoahRootProcessor(phase),
|
||||
_serial_roots(phase),
|
||||
_vm_roots(phase),
|
||||
_cld_roots(phase),
|
||||
_cld_roots(phase, n_workers),
|
||||
_thread_roots(phase, n_workers > 1),
|
||||
_serial_weak_roots(phase),
|
||||
_weak_roots(phase),
|
||||
@ -217,7 +219,7 @@ ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTi
|
||||
ShenandoahRootProcessor(phase),
|
||||
_serial_roots(phase),
|
||||
_vm_roots(phase),
|
||||
_cld_roots(phase),
|
||||
_cld_roots(phase, n_workers),
|
||||
_thread_roots(phase, n_workers > 1),
|
||||
_serial_weak_roots(phase),
|
||||
_weak_roots(phase),
|
||||
@ -243,9 +245,9 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
|
||||
_vm_roots.oops_do(oops, worker_id);
|
||||
_weak_roots.oops_do<OopClosure>(oops, worker_id);
|
||||
_dedup_roots.oops_do(&always_true, oops, worker_id);
|
||||
_cld_roots.cld_do(&adjust_cld_closure, worker_id);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_cld_roots.cld_do(&adjust_cld_closure, worker_id);
|
||||
_code_roots.code_blobs_do(adjust_code_closure, worker_id);
|
||||
_thread_roots.oops_do(oops, NULL, worker_id);
|
||||
}
|
||||
@ -255,7 +257,7 @@ ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
|
||||
_serial_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_thread_roots(ShenandoahPhaseTimings::heap_iteration_roots, false /*is par*/),
|
||||
_vm_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_cld_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_cld_roots(ShenandoahPhaseTimings::heap_iteration_roots, 1),
|
||||
_serial_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_weak_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
_dedup_roots(ShenandoahPhaseTimings::heap_iteration_roots),
|
||||
@ -280,9 +282,9 @@ ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner() :
|
||||
_vm_roots.oops_do(oops, 0);
|
||||
_weak_roots.oops_do<OopClosure>(oops, 0);
|
||||
_dedup_roots.oops_do(&always_true, oops, 0);
|
||||
_cld_roots.cld_do(&clds, 0);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_cld_roots.cld_do(&clds, 0);
|
||||
_code_roots.code_blobs_do(&code, 0);
|
||||
_thread_roots.threads_do(&tc_cl, 0);
|
||||
}
|
||||
|
@ -218,9 +218,18 @@ public:
|
||||
template <bool CONCURRENT, bool SINGLE_THREADED>
|
||||
class ShenandoahClassLoaderDataRoots {
|
||||
private:
|
||||
ShenandoahSharedSemaphore _semaphore;
|
||||
ShenandoahPhaseTimings::Phase _phase;
|
||||
|
||||
static uint worker_count(uint n_workers) {
|
||||
// Limit concurrency a bit, otherwise it wastes resources when workers are tripping
|
||||
// over each other. This also leaves free workers to process other parts of the root
|
||||
// set, while admitted workers are busy with doing the CLDG walk.
|
||||
return MAX2(1u, MIN2(ShenandoahSharedSemaphore::max_tokens(), n_workers / 2));
|
||||
}
|
||||
|
||||
public:
|
||||
ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase);
|
||||
ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase, uint n_workers);
|
||||
~ShenandoahClassLoaderDataRoots();
|
||||
|
||||
void always_strong_cld_do(CLDClosure* clds, uint worker_id);
|
||||
|
@ -121,7 +121,8 @@ void ShenandoahVMRoots<CONCURRENT>::oops_do(T* cl, uint worker_id) {
|
||||
}
|
||||
|
||||
template <bool CONCURRENT, bool SINGLE_THREADED>
|
||||
ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase) :
|
||||
ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::ShenandoahClassLoaderDataRoots(ShenandoahPhaseTimings::Phase phase, uint n_workers) :
|
||||
_semaphore(worker_count(n_workers)),
|
||||
_phase(phase) {
|
||||
if (!SINGLE_THREADED) {
|
||||
ClassLoaderDataGraph::clear_claimed_marks();
|
||||
@ -145,9 +146,10 @@ void ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::always_strong_
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
|
||||
assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread");
|
||||
ClassLoaderDataGraph::always_strong_cld_do(clds);
|
||||
} else {
|
||||
} else if (_semaphore.try_acquire()) {
|
||||
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id);
|
||||
ClassLoaderDataGraph::always_strong_cld_do(clds);
|
||||
_semaphore.claim_all();
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,9 +159,10 @@ void ShenandoahClassLoaderDataRoots<CONCURRENT, SINGLE_THREADED>::cld_do(CLDClos
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
|
||||
assert(Thread::current()->is_VM_thread(), "Single threaded CLDG iteration can only be done by VM thread");
|
||||
ClassLoaderDataGraph::cld_do(clds);
|
||||
} else {
|
||||
} else if (_semaphore.try_acquire()) {
|
||||
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CLDGRoots, worker_id);
|
||||
ClassLoaderDataGraph::cld_do(clds);
|
||||
_semaphore.claim_all();
|
||||
}
|
||||
}
|
||||
|
||||
@ -204,7 +207,7 @@ ShenandoahRootScanner<ITR>::ShenandoahRootScanner(uint n_workers, ShenandoahPhas
|
||||
_code_roots(phase),
|
||||
_vm_roots(phase),
|
||||
_dedup_roots(phase),
|
||||
_cld_roots(phase) {
|
||||
_cld_roots(phase, n_workers) {
|
||||
}
|
||||
|
||||
template <typename ITR>
|
||||
@ -239,9 +242,9 @@ void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops, CLDC
|
||||
// Process light-weight/limited parallel roots then
|
||||
_vm_roots.oops_do(oops, worker_id);
|
||||
_dedup_roots.oops_do(&always_true, oops, worker_id);
|
||||
_cld_roots.cld_do(clds, worker_id);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_cld_roots.cld_do(clds, worker_id);
|
||||
_thread_roots.threads_do(&tc_cl, worker_id);
|
||||
}
|
||||
|
||||
@ -256,9 +259,9 @@ void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oop
|
||||
|
||||
// Process light-weight/limited parallel roots then
|
||||
_vm_roots.oops_do(oops, worker_id);
|
||||
_cld_roots.always_strong_cld_do(clds, worker_id);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_cld_roots.always_strong_cld_do(clds, worker_id);
|
||||
_thread_roots.threads_do(&tc_cl, worker_id);
|
||||
}
|
||||
|
||||
@ -280,9 +283,9 @@ void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAliv
|
||||
_vm_roots.oops_do(keep_alive, worker_id);
|
||||
_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
|
||||
_dedup_roots.oops_do(is_alive, keep_alive, worker_id);
|
||||
_cld_roots.cld_do(&clds, worker_id);
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_cld_roots.cld_do(&clds, worker_id);
|
||||
_code_roots.code_blobs_do(codes_cl, worker_id);
|
||||
_thread_roots.oops_do(keep_alive, NULL, worker_id);
|
||||
|
||||
|
@ -244,4 +244,38 @@ private:
|
||||
|
||||
};
|
||||
|
||||
typedef struct ShenandoahSharedSemaphore {
|
||||
shenandoah_padding(0);
|
||||
volatile ShenandoahSharedValue value;
|
||||
shenandoah_padding(1);
|
||||
|
||||
static uint max_tokens() {
|
||||
return sizeof(ShenandoahSharedValue) * CHAR_MAX;
|
||||
}
|
||||
|
||||
ShenandoahSharedSemaphore(uint tokens) {
|
||||
assert(tokens <= max_tokens(), "sanity");
|
||||
Atomic::release_store_fence(&value, (ShenandoahSharedValue)tokens);
|
||||
}
|
||||
|
||||
bool try_acquire() {
|
||||
while (true) {
|
||||
ShenandoahSharedValue ov = Atomic::load_acquire(&value);
|
||||
if (ov == 0) {
|
||||
return false;
|
||||
}
|
||||
ShenandoahSharedValue nv = ov - 1;
|
||||
if (Atomic::cmpxchg(&value, ov, nv) == ov) {
|
||||
// successfully set
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void claim_all() {
|
||||
Atomic::release_store_fence(&value, (ShenandoahSharedValue)0);
|
||||
}
|
||||
|
||||
} ShenandoahSharedSemaphore;
|
||||
|
||||
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSHAREDVARIABLES_HPP
|
||||
|
Loading…
Reference in New Issue
Block a user