This commit is contained in:
Rob McKenna 2020-06-04 00:23:59 +00:00
commit 05df28d775
47 changed files with 547 additions and 438 deletions

View File

@ -27,9 +27,9 @@
#include "gc/shenandoah/shenandoahForwarding.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahRuntime.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"

View File

@ -27,9 +27,9 @@
#include "gc/shenandoah/shenandoahForwarding.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahRuntime.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"

View File

@ -26,11 +26,11 @@
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahForwarding.hpp"
#include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahRuntime.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
#include "gc/shenandoah/c2/shenandoahSupport.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "opto/arraycopynode.hpp"
#include "opto/escape.hpp"
#include "opto/graphKit.hpp"

View File

@ -1298,6 +1298,9 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
CallProjections projs;
call->extract_projections(&projs, false, false);
#ifdef ASSERT
VectorSet cloned(Thread::current()->resource_area());
#endif
Node* lrb_clone = lrb->clone();
phase->register_new_node(lrb_clone, projs.catchall_catchproj);
phase->set_ctrl(lrb, projs.fallthrough_catchproj);
@ -1326,6 +1329,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
stack.set_index(idx+1);
assert(!u->is_CFG(), "");
stack.push(u, 0);
assert(!cloned.test_set(u->_idx), "only one clone");
Node* u_clone = u->clone();
int nb = u_clone->replace_edge(n, n_clone);
assert(nb > 0, "should have replaced some uses");
@ -1353,9 +1357,33 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
assert(nb > 0, "should have replaced some uses");
replaced = true;
} else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
phase->igvn().rehash_node_delayed(u);
int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
assert(nb > 0, "should have replaced some uses");
if (u->is_If()) {
// Can't break If/Bool/Cmp chain
assert(n->is_Bool(), "unexpected If shape");
assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
assert(n_clone->is_Bool(), "unexpected clone");
assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
Node* bol_clone = n->clone();
Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
bol_clone->set_req(1, cmp_clone);
Node* nn = stack.node_at(stack.size()-3);
Node* nn_clone = clones.at(clones.size()-3);
assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase));
assert(nb > 0, "should have replaced some uses");
phase->register_new_node(bol_clone, u->in(0));
phase->register_new_node(cmp_clone, u->in(0));
phase->igvn().replace_input_of(u, 1, bol_clone);
} else {
phase->igvn().rehash_node_delayed(u);
int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
assert(nb > 0, "should have replaced some uses");
}
replaced = true;
}
}
@ -2274,17 +2302,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
mem = call->in(TypeFunc::Memory);
} else if (in->Opcode() == Op_NeverBranch) {
Node* head = in->in(0);
assert(head->is_Region() && head->req() == 3, "unexpected infinite loop graph shape");
assert(_phase->is_dominator(head, head->in(1)) || _phase->is_dominator(head, head->in(2)), "no back branch?");
Node* tail = _phase->is_dominator(head, head->in(1)) ? head->in(1) : head->in(2);
Node* c = tail;
while (c != head) {
if (c->is_SafePoint() && !c->is_CallLeaf()) {
mem = c->in(TypeFunc::Memory);
}
c = _phase->idom(c);
}
assert(mem != NULL, "should have found safepoint");
assert(head->is_Region(), "unexpected infinite loop graph shape");
Node* phi_mem = NULL;
for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
@ -2301,7 +2319,28 @@ void MemoryGraphFixer::collect_memory_nodes() {
}
}
}
if (phi_mem != NULL) {
if (phi_mem == NULL) {
for (uint j = 1; j < head->req(); j++) {
Node* tail = head->in(j);
if (!_phase->is_dominator(head, tail)) {
continue;
}
Node* c = tail;
while (c != head) {
if (c->is_SafePoint() && !c->is_CallLeaf()) {
Node* m =c->in(TypeFunc::Memory);
if (m->is_MergeMem()) {
m = m->as_MergeMem()->memory_at(_alias);
}
assert(mem == NULL || mem == m, "several memory states");
mem = m;
}
c = _phase->idom(c);
}
assert(mem != NULL, "should have found safepoint");
}
assert(mem != NULL, "should have found safepoint");
} else {
mem = phi_mem;
}
}
@ -2410,7 +2449,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
if (m != NULL) {
if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
// continue
} else if (unique == NULL) {
unique = m;

View File

@ -160,15 +160,3 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() const {
return ShenandoahHeuristics::should_start_gc();
}
const char* ShenandoahAdaptiveHeuristics::name() {
return "adaptive";
}
bool ShenandoahAdaptiveHeuristics::is_diagnostic() {
return false;
}
bool ShenandoahAdaptiveHeuristics::is_experimental() {
return false;
}

View File

@ -24,7 +24,7 @@
#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP
#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "utilities/numberSeq.hpp"
@ -42,11 +42,9 @@ public:
virtual bool should_start_gc() const;
virtual const char* name();
virtual bool is_diagnostic();
virtual bool is_experimental();
virtual const char* name() { return "Adaptive"; }
virtual bool is_diagnostic() { return false; }
virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP

View File

@ -72,15 +72,3 @@ bool ShenandoahAggressiveHeuristics::should_unload_classes() {
// Randomly unload classes with 50% chance.
return (os::random() & 1) == 1;
}
const char* ShenandoahAggressiveHeuristics::name() {
return "aggressive";
}
bool ShenandoahAggressiveHeuristics::is_diagnostic() {
return true;
}
bool ShenandoahAggressiveHeuristics::is_experimental() {
return false;
}

View File

@ -24,7 +24,7 @@
#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP
#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
public:
@ -40,11 +40,9 @@ public:
virtual bool should_unload_classes();
virtual const char* name();
virtual bool is_diagnostic();
virtual bool is_experimental();
virtual const char* name() { return "Aggressive"; }
virtual bool is_diagnostic() { return true; }
virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP

View File

@ -92,15 +92,3 @@ void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(Shenando
}
}
}
const char* ShenandoahCompactHeuristics::name() {
return "compact";
}
bool ShenandoahCompactHeuristics::is_diagnostic() {
return false;
}
bool ShenandoahCompactHeuristics::is_experimental() {
return false;
}

View File

@ -24,7 +24,7 @@
#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP
#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
class ShenandoahCompactHeuristics : public ShenandoahHeuristics {
public:
@ -36,11 +36,9 @@ public:
RegionData* data, size_t size,
size_t actual_free);
virtual const char* name();
virtual bool is_diagnostic();
virtual bool is_experimental();
virtual const char* name() { return "Compact"; }
virtual bool is_diagnostic() { return false; }
virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP

View File

@ -28,8 +28,8 @@
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "logging/log.hpp"
#include "logging/logTag.hpp"

View File

@ -21,8 +21,8 @@
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEURISTICS_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHHEURISTICS_HPP
#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP
#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP
#include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
@ -134,4 +134,4 @@ public:
double time_since_last_gc() const;
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEURISTICS_HPP
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHHEURISTICS_HPP

View File

@ -77,15 +77,3 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
}
}
}
const char* ShenandoahPassiveHeuristics::name() {
return "passive";
}
bool ShenandoahPassiveHeuristics::is_diagnostic() {
return true;
}
bool ShenandoahPassiveHeuristics::is_experimental() {
return false;
}

View File

@ -24,7 +24,7 @@
#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP
#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
public:
@ -40,11 +40,9 @@ public:
RegionData* data, size_t data_size,
size_t free);
virtual const char* name();
virtual bool is_diagnostic();
virtual bool is_experimental();
virtual const char* name() { return "Passive"; }
virtual bool is_diagnostic() { return true; }
virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP

View File

@ -66,15 +66,3 @@ void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(Shenandoa
}
}
}
const char* ShenandoahStaticHeuristics::name() {
return "static";
}
bool ShenandoahStaticHeuristics::is_diagnostic() {
return false;
}
bool ShenandoahStaticHeuristics::is_experimental() {
return false;
}

View File

@ -24,7 +24,7 @@
#ifndef SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP
#define SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
class ShenandoahStaticHeuristics : public ShenandoahHeuristics {
public:
@ -38,11 +38,9 @@ public:
RegionData* data, size_t size,
size_t free);
virtual const char* name();
virtual bool is_diagnostic();
virtual bool is_experimental();
virtual const char* name() { return "Static"; }
virtual bool is_diagnostic() { return false; }
virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP

View File

@ -24,11 +24,11 @@
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahIUMode.hpp"
#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
#include "gc/shenandoah/mode/shenandoahIUMode.hpp"
#include "logging/log.hpp"
#include "logging/logTag.hpp"
@ -55,3 +55,21 @@ void ShenandoahIUMode::initialize_flags() const {
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
}
ShenandoahHeuristics* ShenandoahIUMode::initialize_heuristics() const {
if (ShenandoahGCHeuristics != NULL) {
if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
return new ShenandoahAggressiveHeuristics();
} else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
return new ShenandoahStaticHeuristics();
} else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
return new ShenandoahAdaptiveHeuristics();
} else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
return new ShenandoahCompactHeuristics();
} else {
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
}
}
ShouldNotReachHere();
return NULL;
}

View File

@ -22,20 +22,21 @@
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHIUMODE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHIUMODE_HPP
#ifndef SHARE_GC_SHENANDOAH_MODE_SHENANDOAHIUMODE_HPP
#define SHARE_GC_SHENANDOAH_MODE_SHENANDOAHIUMODE_HPP
#include "gc/shenandoah/shenandoahNormalMode.hpp"
#include "gc/shenandoah/mode/shenandoahMode.hpp"
class ShenandoahHeuristics;
class ShenandoahIUMode : public ShenandoahNormalMode {
class ShenandoahIUMode : public ShenandoahMode {
public:
virtual void initialize_flags() const;
virtual ShenandoahHeuristics* initialize_heuristics() const;
virtual const char* name() { return "Incremental-Update"; }
virtual const char* name() { return "Incremental-Update (IU)"; }
virtual bool is_diagnostic() { return false; }
virtual bool is_experimental() { return true; }
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHIUMODE_HPP
#endif // SHARE_GC_SHENANDOAH_MODE_SHENANDOAHIUMODE_HPP

View File

@ -21,8 +21,8 @@
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMODE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHMODE_HPP
#ifndef SHARE_GC_SHENANDOAH_MODE_SHENANDOAHMODE_HPP
#define SHARE_GC_SHENANDOAH_MODE_SHENANDOAHMODE_HPP
#include "memory/allocation.hpp"
@ -53,4 +53,4 @@ public:
virtual bool is_experimental() = 0;
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHMODE_HPP
#endif // SHARE_GC_SHENANDOAH_MODE_SHENANDOAHMODE_HPP

View File

@ -22,8 +22,8 @@
*/
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahPassiveMode.hpp"
#include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
#include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
#include "logging/log.hpp"
#include "logging/logTag.hpp"

View File

@ -21,12 +21,12 @@
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHPASSIVEMODE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHPASSIVEMODE_HPP
#ifndef SHARE_GC_SHENANDOAH_MODE_SHENANDOAHPASSIVEMODE_HPP
#define SHARE_GC_SHENANDOAH_MODE_SHENANDOAHPASSIVEMODE_HPP
#include "gc/shenandoah/shenandoahNormalMode.hpp"
#include "gc/shenandoah/mode/shenandoahMode.hpp"
class ShenandoahPassiveMode : public ShenandoahNormalMode {
class ShenandoahPassiveMode : public ShenandoahMode {
public:
virtual void initialize_flags() const;
virtual ShenandoahHeuristics* initialize_heuristics() const;
@ -36,4 +36,4 @@ public:
virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHNORMALMODE_HPP
#endif // SHARE_GC_SHENANDOAH_MODE_SHENANDOAHPASSIVEMODE_HPP

View File

@ -22,15 +22,15 @@
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahNormalMode.hpp"
#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
#include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
#include "logging/log.hpp"
#include "logging/logTag.hpp"
void ShenandoahNormalMode::initialize_flags() const {
void ShenandoahSATBMode::initialize_flags() const {
if (ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
FLAG_SET_DEFAULT(ShenandoahSuspendibleWorkers, true);
}
@ -46,7 +46,7 @@ void ShenandoahNormalMode::initialize_flags() const {
SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
}
ShenandoahHeuristics* ShenandoahNormalMode::initialize_heuristics() const {
ShenandoahHeuristics* ShenandoahSATBMode::initialize_heuristics() const {
if (ShenandoahGCHeuristics != NULL) {
if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
return new ShenandoahAggressiveHeuristics();

View File

@ -21,20 +21,20 @@
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHNORMALMODE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHNORMALMODE_HPP
#ifndef SHARE_GC_SHENANDOAH_MODE_SHENANDOAHSATBMODE_HPP
#define SHARE_GC_SHENANDOAH_MODE_SHENANDOAHSATBMODE_HPP
#include "gc/shenandoah/shenandoahMode.hpp"
#include "gc/shenandoah/mode/shenandoahMode.hpp"
class ShenandoahHeuristics;
class ShenandoahNormalMode : public ShenandoahMode {
class ShenandoahSATBMode : public ShenandoahMode {
public:
virtual void initialize_flags() const;
virtual ShenandoahHeuristics* initialize_heuristics() const;
virtual const char* name() { return "Normal"; }
virtual const char* name() { return "Snapshot-At-The-Beginning (SATB)"; }
virtual bool is_diagnostic() { return false; }
virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHNORMALMODE_HPP
#endif // SHARE_GC_SHENANDOAH_MODE_SHENANDOAHSATBMODE_HPP

View File

@ -143,26 +143,11 @@ void ShenandoahArguments::initialize() {
#endif // ASSERT
#endif // COMPILER2
if (AlwaysPreTouch) {
// Shenandoah handles pre-touch on its own. It does not let the
// generic storage code to do the pre-touch before Shenandoah has
// a chance to do it on its own.
FLAG_SET_DEFAULT(AlwaysPreTouch, false);
FLAG_SET_DEFAULT(ShenandoahAlwaysPreTouch, true);
}
// Record more information about previous cycles for improved debugging pleasure
if (FLAG_IS_DEFAULT(LogEventsBufferEntries)) {
FLAG_SET_DEFAULT(LogEventsBufferEntries, 250);
}
if (ShenandoahAlwaysPreTouch) {
if (!FLAG_IS_DEFAULT(ShenandoahUncommit)) {
warning("AlwaysPreTouch is enabled, disabling ShenandoahUncommit");
}
FLAG_SET_DEFAULT(ShenandoahUncommit, false);
}
if ((InitialHeapSize == MaxHeapSize) && ShenandoahUncommit) {
log_info(gc)("Min heap equals to max heap, disabling ShenandoahUncommit");
FLAG_SET_DEFAULT(ShenandoahUncommit, false);

View File

@ -30,7 +30,7 @@
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "memory/iterator.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#ifdef COMPILER1

View File

@ -259,9 +259,10 @@ public:
}
}
if (heap->is_degenerated_gc_in_progress()) {
// Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
// let's check here.
if (heap->is_degenerated_gc_in_progress() || heap->is_full_gc_in_progress()) {
// Full GC does not execute concurrent cycle.
// Degenerated cycle may bypass concurrent cycle.
// So code roots might not be scanned, let's scan here.
_cm->concurrent_scan_code_roots(worker_id, rp);
}
@ -300,9 +301,7 @@ void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_pha
workers->run_task(&mark_roots);
}
if (ShenandoahConcurrentScanCodeRoots) {
clear_claim_codecache();
}
clear_claim_codecache();
}
void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
@ -381,7 +380,7 @@ void ShenandoahConcurrentMark::initialize(uint workers) {
}
void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
if (claim_codecache()) {
ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
if (!_heap->unload_classes()) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@ -927,11 +926,9 @@ void ShenandoahConcurrentMark::mark_loop_work(T* cl, ShenandoahLiveData* live_da
}
bool ShenandoahConcurrentMark::claim_codecache() {
assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
return _claimed_codecache.try_set();
}
void ShenandoahConcurrentMark::clear_claim_codecache() {
assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
_claimed_codecache.unset();
}

View File

@ -28,12 +28,12 @@
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
#include "gc/shenandoah/shenandoahControlThread.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "memory/iterator.hpp"
#include "memory/universe.hpp"
#include "runtime/atomic.hpp"

View File

@ -44,18 +44,15 @@
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
#include "gc/shenandoah/shenandoahIUMode.hpp"
#include "gc/shenandoah/shenandoahMarkCompact.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahMemoryPool.hpp"
#include "gc/shenandoah/shenandoahMetrics.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
#include "gc/shenandoah/shenandoahNormalMode.hpp"
#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahPacer.inline.hpp"
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
#include "gc/shenandoah/shenandoahPassiveMode.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahStringDedup.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.hpp"
@ -65,6 +62,9 @@
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "gc/shenandoah/shenandoahWorkGroup.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
#include "gc/shenandoah/mode/shenandoahIUMode.hpp"
#include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
#include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
#if INCLUDE_JFR
#include "gc/shenandoah/shenandoahJfrSupport.hpp"
#endif
@ -107,7 +107,9 @@ public:
virtual void work(uint worker_id) {
ShenandoahHeapRegion* r = _regions.next();
while (r != NULL) {
os::pretouch_memory(r->bottom(), r->end(), _page_size);
if (r->is_committed()) {
os::pretouch_memory(r->bottom(), r->end(), _page_size);
}
r = _regions.next();
}
}
@ -133,7 +135,9 @@ public:
size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
if (r->is_committed()) {
os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
}
r = _regions.next();
}
@ -152,11 +156,6 @@ jint ShenandoahHeap::initialize() {
size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
if (ShenandoahAlwaysPreTouch) {
// Enabled pre-touch means the entire heap is committed right away.
init_byte_size = max_byte_size;
}
Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
@ -315,38 +314,32 @@ jint ShenandoahHeap::initialize() {
_free_set->rebuild();
}
if (ShenandoahAlwaysPreTouch) {
assert(!AlwaysPreTouch, "Should have been overridden");
if (AlwaysPreTouch) {
// For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
// before initialize() below zeroes it with initializing thread. For any given region,
// we touch the region and the corresponding bitmaps from the same thread.
ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
size_t pretouch_heap_page_size = heap_page_size;
size_t pretouch_bitmap_page_size = bitmap_page_size;
_pretouch_heap_page_size = heap_page_size;
_pretouch_bitmap_page_size = bitmap_page_size;
#ifdef LINUX
// UseTransparentHugePages would madvise that backing memory can be coalesced into huge
// pages. But, the kernel needs to know that every small page is used, in order to coalesce
// them into huge one. Therefore, we need to pretouch with smaller pages.
if (UseTransparentHugePages) {
pretouch_heap_page_size = (size_t)os::vm_page_size();
pretouch_bitmap_page_size = (size_t)os::vm_page_size();
_pretouch_heap_page_size = (size_t)os::vm_page_size();
_pretouch_bitmap_page_size = (size_t)os::vm_page_size();
}
#endif
// OS memory managers may want to coalesce back-to-back pages. Make their jobs
// simpler by pre-touching continuous spaces (heap and bitmap) separately.
log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
_num_regions, pretouch_bitmap_page_size);
ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size);
ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
_workers->run_task(&bcl);
log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
_num_regions, pretouch_heap_page_size);
ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size);
ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
_workers->run_task(&hcl);
}
@ -397,8 +390,8 @@ jint ShenandoahHeap::initialize() {
void ShenandoahHeap::initialize_heuristics() {
if (ShenandoahGCMode != NULL) {
if (strcmp(ShenandoahGCMode, "normal") == 0) {
_gc_mode = new ShenandoahNormalMode();
if (strcmp(ShenandoahGCMode, "satb") == 0) {
_gc_mode = new ShenandoahSATBMode();
} else if (strcmp(ShenandoahGCMode, "iu") == 0) {
_gc_mode = new ShenandoahIUMode();
} else if (strcmp(ShenandoahGCMode, "passive") == 0) {
@ -2467,9 +2460,16 @@ bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
size_t slice = r->index() / _bitmap_regions_per_slice;
size_t off = _bitmap_bytes_per_slice * slice;
size_t len = _bitmap_bytes_per_slice;
if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
char* start = (char*) _bitmap_region.start() + off;
if (!os::commit_memory(start, len, false)) {
return false;
}
if (AlwaysPreTouch) {
os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
}
return true;
}

View File

@ -621,6 +621,9 @@ private:
size_t _bitmap_regions_per_slice;
size_t _bitmap_bytes_per_slice;
size_t _pretouch_heap_page_size;
size_t _pretouch_bitmap_page_size;
bool _bitmap_region_special;
bool _aux_bitmap_region_special;
@ -657,6 +660,8 @@ public:
ShenandoahLiveData* get_liveness_cache(uint worker_id);
void flush_liveness_cache(uint worker_id);
size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
// ---------- Evacuation support
//
private:

View File

@ -625,6 +625,9 @@ void ShenandoahHeapRegion::do_commit() {
if (!heap->commit_bitmap_slice(this)) {
report_java_out_of_memory("Unable to commit bitmaps for region");
}
if (AlwaysPreTouch) {
os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
}
heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
}

View File

@ -35,7 +35,6 @@
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
@ -43,6 +42,7 @@
#include "gc/shenandoah/shenandoahVerifier.hpp"
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "memory/metaspace.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.inline.hpp"

View File

@ -27,15 +27,17 @@
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "runtime/orderAccess.hpp"
#include "utilities/ostream.hpp"
#define SHENANDOAH_PHASE_NAME_FORMAT "%-28s"
#define SHENANDOAH_PHASE_NAME_FORMAT "%-30s"
#define SHENANDOAH_S_TIME_FORMAT "%8.3lf"
#define SHENANDOAH_US_TIME_FORMAT "%8.0lf"
#define SHENANDOAH_US_WORKER_TIME_FORMAT "%3.0lf"
#define SHENANDOAH_US_WORKER_NOTIME_FORMAT "%3s"
#define SHENANDOAH_PARALLELISM_FORMAT "%4.2lf"
#define SHENANDOAH_PHASE_DECLARE_NAME(type, title) \
title,
@ -227,6 +229,14 @@ void ShenandoahPhaseTimings::print_cycle_on(outputStream* out) const {
double v = _cycle_data[i] * 1000000.0;
if (v > 0) {
out->print(SHENANDOAH_PHASE_NAME_FORMAT " " SHENANDOAH_US_TIME_FORMAT " us", _phase_names[i], v);
if (is_worker_phase(Phase(i))) {
double total = _cycle_data[i + 1] * 1000000.0;
if (total > 0) {
out->print(", parallelism: " SHENANDOAH_PARALLELISM_FORMAT "x", total / v);
}
}
if (_worker_data[i] != NULL) {
out->print(", workers (us): ");
for (uint c = 0; c < _max_workers; c++) {
@ -234,7 +244,7 @@ void ShenandoahPhaseTimings::print_cycle_on(outputStream* out) const {
if (tv != ShenandoahWorkerData::uninitialized()) {
out->print(SHENANDOAH_US_WORKER_TIME_FORMAT ", ", tv * 1000000.0);
} else {
out->print("%3s, ", "---");
out->print(SHENANDOAH_US_WORKER_NOTIME_FORMAT ", ", "---");
}
}
}

View File

@ -30,11 +30,11 @@
#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "memory/resourceArea.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/safepoint.hpp"
@ -234,17 +234,8 @@ void ShenandoahRootScanner<ITR>::roots_do(uint worker_id, OopClosure* oops, CLDC
assert(clds != NULL, "Only possible with CLD closure");
_cld_roots.cld_do(clds, worker_id);
// With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
// and instead do that in concurrent phase under the relevant lock. This saves init mark
// pause time.
if (code != NULL && !ShenandoahConcurrentScanCodeRoots) {
_code_roots.code_blobs_do(code, worker_id);
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, NULL, tc);
_thread_roots.threads_do(&tc_cl, worker_id);
} else {
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
_thread_roots.threads_do(&tc_cl, worker_id);
}
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, code, tc);
_thread_roots.threads_do(&tc_cl, worker_id);
AlwaysTrueClosure always_true;
_dedup_roots.oops_do(&always_true, oops, worker_id);

View File

@ -30,8 +30,8 @@
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahMarkCompact.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
#include "utilities/debug.hpp"
ShenandoahPhaseTimings::Phase ShenandoahGCPhase::_current_phase = ShenandoahPhaseTimings::_invalid_phase;

View File

@ -62,10 +62,10 @@
"This also caps the maximum TLAB size.") \
range(1, 100) \
\
experimental(ccstr, ShenandoahGCMode, "normal", \
experimental(ccstr, ShenandoahGCMode, "satb", \
"GC mode to use. Among other things, this defines which " \
"barriers are in in use. Possible values are:" \
" normal - default concurrent GC (three pass mark-evac-update);" \
" satb - snapshot-at-the-beginning concurrent GC (three pass mark-evac-update);" \
" iu - incremental-update concurrent GC (three pass mark-evac-update);" \
" passive - stop the world GC only (either degenerated or full)") \
\
@ -299,9 +299,6 @@
diagnostic(bool, ShenandoahAllocFailureALot, false, \
"Testing: make lots of artificial allocation failures.") \
\
diagnostic(bool, ShenandoahAlwaysPreTouch, false, \
"Pre-touch heap memory, overrides global AlwaysPreTouch.") \
\
experimental(intx, ShenandoahMarkScanPrefetch, 32, \
"How many objects to prefetch ahead when traversing mark bitmaps."\
"Set to 0 to disable prefetching.") \
@ -347,9 +344,6 @@
diagnostic(bool, ShenandoahLoadRefBarrier, true, \
"Turn on/off load-reference barriers in Shenandoah") \
\
diagnostic(bool, ShenandoahConcurrentScanCodeRoots, true, \
"Scan code roots concurrently, instead of during a pause") \
\
diagnostic(uintx, ShenandoahCodeRootsStyle, 2, \
"Use this style to scan the code cache roots:" \
" 0 - sequential iterator;" \

View File

@ -221,11 +221,11 @@ void PhaseIdealLoop::do_unswitching(IdealLoopTree *loop, Node_List &old_new) {
// Hardwire the control paths in the loops into if(true) and if(false)
_igvn.rehash_node_delayed(unswitch_iff);
short_circuit_if(unswitch_iff, proj_true);
dominated_by(proj_true, unswitch_iff, false, false);
IfNode* unswitch_iff_clone = old_new[unswitch_iff->_idx]->as_If();
_igvn.rehash_node_delayed(unswitch_iff_clone);
short_circuit_if(unswitch_iff_clone, proj_false);
dominated_by(proj_false, unswitch_iff_clone, false, false);
// Reoptimize loops
loop->record_for_igvn();

View File

@ -22,273 +22,204 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* This module tracks classes that have been prepared, so as to
* be able to compute which have been unloaded. On VM start-up
* all prepared classes are put in a table. As class prepare
* events come in they are added to the table. After an unload
* event or series of them, the VM can be asked for the list
* of classes; this list is compared against the table keep by
* this module, any classes no longer present are known to
* have been unloaded.
*
* For efficient access, classes are keep in a hash table.
* Each slot in the hash table has a linked list of KlassNode.
*
* Comparing current set of classes is compared with previous
* set by transferring all classes in the current set into
* a new table, any that remain in the old table have been
* unloaded.
* be able to report which have been unloaded. On VM start-up
* and whenever new classes are loaded, all prepared classes'
* signatures are attached as JVMTI tag to the class object.
* Class unloading is tracked by registering
* ObjectFree callback on class objects. When this happens, we find
* the signature of the unloaded class(es) and report them back
* to the event handler to synthesize class-unload-events.
*/
#include "util.h"
#include "bag.h"
#include "classTrack.h"
/* ClassTrack hash table slot count */
#define CT_HASH_SLOT_COUNT 263 /* Prime which eauals 4k+3 for some k */
typedef struct KlassNode {
jclass klass; /* weak global reference */
char *signature; /* class signature */
struct KlassNode *next; /* next node in this slot */
} KlassNode;
#define NOT_TAGGED 0
/*
* Hash table of prepared classes. Each entry is a pointer
* to a linked list of KlassNode.
* The JVMTI tracking env to keep track of klass tags for class-unloads
*/
static KlassNode **table;
static jvmtiEnv* trackingEnv;
/*
* Return slot in hash table to use for this class.
* A bag containing all the deleted classes' signatures. Must be accessed under
* classTrackLock.
*/
static jint
hashKlass(jclass klass)
struct bag* deletedSignatures;
/*
* Lock to keep integrity of deletedSignatures.
*/
static jrawMonitorID classTrackLock;
/*
* Invoke the callback when classes are freed, find and record the signature
* in deletedSignatures. Those are only used in addPreparedClass() by the
* same thread.
*/
static void JNICALL
cbTrackingObjectFree(jvmtiEnv* jvmti_env, jlong tag)
{
jint hashCode = objectHashCode(klass);
return abs(hashCode) % CT_HASH_SLOT_COUNT;
debugMonitorEnter(classTrackLock);
if (deletedSignatures == NULL) {
debugMonitorExit(classTrackLock);
return;
}
*(char**)bagAdd(deletedSignatures) = (char*)jlong_to_ptr(tag);
debugMonitorExit(classTrackLock);
}
/*
* Transfer a node (which represents klass) from the current
* table to the new table.
*/
static void
transferClass(JNIEnv *env, jclass klass, KlassNode **newTable) {
jint slot = hashKlass(klass);
KlassNode **head = &table[slot];
KlassNode **newHead = &newTable[slot];
KlassNode **nodePtr;
KlassNode *node;
/* Search the node list of the current table for klass */
for (nodePtr = head; node = *nodePtr, node != NULL; nodePtr = &(node->next)) {
if (isSameObject(env, klass, node->klass)) {
/* Match found transfer node */
/* unlink from old list */
*nodePtr = node->next;
/* insert in new list */
node->next = *newHead;
*newHead = node;
return;
}
}
/* we haven't found the class, only unloads should have happenned,
* so the only reason a class should not have been found is
* that it is not prepared yet, in which case we don't want it.
* Asset that the above is true.
*/
/**** the HotSpot VM doesn't create prepare events for some internal classes ***
JDI_ASSERT_MSG((classStatus(klass) &
(JVMTI_CLASS_STATUS_PREPARED|JVMTI_CLASS_STATUS_ARRAY))==0,
classSignature(klass));
***/
}
/*
* Delete a hash table of classes.
* The signatures of classes in the table are returned.
*/
static struct bag *
deleteTable(JNIEnv *env, KlassNode *oldTable[])
{
struct bag *signatures = bagCreateBag(sizeof(char*), 10);
jint slot;
if (signatures == NULL) {
EXIT_ERROR(AGENT_ERROR_OUT_OF_MEMORY,"signatures");
}
for (slot = 0; slot < CT_HASH_SLOT_COUNT; slot++) {
KlassNode *node = oldTable[slot];
while (node != NULL) {
KlassNode *next;
char **sigSpot;
/* Add signature to the signature bag */
sigSpot = bagAdd(signatures);
if (sigSpot == NULL) {
EXIT_ERROR(AGENT_ERROR_OUT_OF_MEMORY,"signature bag");
}
*sigSpot = node->signature;
/* Free weak ref and the node itself */
JNI_FUNC_PTR(env,DeleteWeakGlobalRef)(env, node->klass);
next = node->next;
jvmtiDeallocate(node);
node = next;
}
}
jvmtiDeallocate(oldTable);
return signatures;
}
/*
* Called after class unloads have occurred. Creates a new hash table
* of currently loaded prepared classes.
* The signatures of classes which were unloaded (not present in the
* new table) are returned.
* Called after class unloads have occurred.
* The signatures of classes which were unloaded are returned.
*/
struct bag *
classTrack_processUnloads(JNIEnv *env)
{
KlassNode **newTable;
struct bag *unloadedSignatures;
unloadedSignatures = NULL;
newTable = jvmtiAllocate(CT_HASH_SLOT_COUNT * sizeof(KlassNode *));
if (newTable == NULL) {
EXIT_ERROR(AGENT_ERROR_OUT_OF_MEMORY, "classTrack table");
} else {
(void)memset(newTable, 0, CT_HASH_SLOT_COUNT * sizeof(KlassNode *));
WITH_LOCAL_REFS(env, 1) {
jint classCount;
jclass *classes;
jvmtiError error;
int i;
error = allLoadedClasses(&classes, &classCount);
if ( error != JVMTI_ERROR_NONE ) {
jvmtiDeallocate(newTable);
EXIT_ERROR(error,"loaded classes");
} else {
/* Transfer each current class into the new table */
for (i=0; i<classCount; i++) {
jclass klass = classes[i];
transferClass(env, klass, newTable);
}
jvmtiDeallocate(classes);
/* Delete old table, install new one */
unloadedSignatures = deleteTable(env, table);
table = newTable;
}
} END_WITH_LOCAL_REFS(env)
debugMonitorEnter(classTrackLock);
if (deletedSignatures == NULL) {
// Class tracking not initialized, nobody's interested.
debugMonitorExit(classTrackLock);
return NULL;
}
return unloadedSignatures;
struct bag* deleted = deletedSignatures;
deletedSignatures = bagCreateBag(sizeof(char*), 10);
debugMonitorExit(classTrackLock);
return deleted;
}
/*
* Add a class to the prepared class hash table.
* Assumes no duplicates.
* Add a class to the prepared class table.
*/
void
classTrack_addPreparedClass(JNIEnv *env, jclass klass)
classTrack_addPreparedClass(JNIEnv *env_unused, jclass klass)
{
jint slot = hashKlass(klass);
KlassNode **head = &table[slot];
KlassNode *node;
jvmtiError error;
jvmtiEnv* env = trackingEnv;
if (gdata->assertOn) {
/* Check this is not a duplicate */
for (node = *head; node != NULL; node = node->next) {
if (isSameObject(env, klass, node->klass)) {
JDI_ASSERT_FAILED("Attempting to insert duplicate class");
break;
}
if (gdata && gdata->assertOn) {
// Check this is not already tagged.
jlong tag;
error = JVMTI_FUNC_PTR(trackingEnv, GetTag)(env, klass, &tag);
if (error != JVMTI_ERROR_NONE) {
EXIT_ERROR(error, "Unable to GetTag with class trackingEnv");
}
JDI_ASSERT(tag == NOT_TAGGED);
}
node = jvmtiAllocate(sizeof(KlassNode));
if (node == NULL) {
EXIT_ERROR(AGENT_ERROR_OUT_OF_MEMORY,"KlassNode");
}
error = classSignature(klass, &(node->signature), NULL);
char* signature;
error = classSignature(klass, &signature, NULL);
if (error != JVMTI_ERROR_NONE) {
jvmtiDeallocate(node);
EXIT_ERROR(error,"signature");
}
if ((node->klass = JNI_FUNC_PTR(env,NewWeakGlobalRef)(env, klass)) == NULL) {
jvmtiDeallocate(node->signature);
jvmtiDeallocate(node);
EXIT_ERROR(AGENT_ERROR_NULL_POINTER,"NewWeakGlobalRef");
error = JVMTI_FUNC_PTR(trackingEnv, SetTag)(env, klass, ptr_to_jlong(signature));
if (error != JVMTI_ERROR_NONE) {
jvmtiDeallocate(signature);
EXIT_ERROR(error,"SetTag");
}
}
/* Insert the new node */
node->next = *head;
*head = node;
static jboolean
setupEvents()
{
jvmtiCapabilities caps;
memset(&caps, 0, sizeof(caps));
caps.can_generate_object_free_events = 1;
jvmtiError error = JVMTI_FUNC_PTR(trackingEnv, AddCapabilities)(trackingEnv, &caps);
if (error != JVMTI_ERROR_NONE) {
return JNI_FALSE;
}
jvmtiEventCallbacks cb;
memset(&cb, 0, sizeof(cb));
cb.ObjectFree = cbTrackingObjectFree;
error = JVMTI_FUNC_PTR(trackingEnv, SetEventCallbacks)(trackingEnv, &cb, sizeof(cb));
if (error != JVMTI_ERROR_NONE) {
return JNI_FALSE;
}
error = JVMTI_FUNC_PTR(trackingEnv, SetEventNotificationMode)(trackingEnv, JVMTI_ENABLE, JVMTI_EVENT_OBJECT_FREE, NULL);
if (error != JVMTI_ERROR_NONE) {
return JNI_FALSE;
}
return JNI_TRUE;
}
/*
* Called once to build the initial prepared class hash table.
* Called once to initialize class-tracking.
*/
void
classTrack_initialize(JNIEnv *env)
{
WITH_LOCAL_REFS(env, 1) {
deletedSignatures = NULL;
classTrackLock = debugMonitorCreate("Deleted class tag lock");
trackingEnv = getSpecialJvmti();
if (trackingEnv == NULL) {
EXIT_ERROR(AGENT_ERROR_INTERNAL, "Failed to allocate tag-tracking jvmtiEnv");
}
jint classCount;
jclass *classes;
jvmtiError error;
jint i;
error = allLoadedClasses(&classes, &classCount);
if ( error == JVMTI_ERROR_NONE ) {
table = jvmtiAllocate(CT_HASH_SLOT_COUNT * sizeof(KlassNode *));
if (table != NULL) {
(void)memset(table, 0, CT_HASH_SLOT_COUNT * sizeof(KlassNode *));
for (i=0; i<classCount; i++) {
jclass klass = classes[i];
jint status;
jint wanted =
(JVMTI_CLASS_STATUS_PREPARED|JVMTI_CLASS_STATUS_ARRAY);
if (!setupEvents()) {
EXIT_ERROR(AGENT_ERROR_INTERNAL, "Unable to setup ObjectFree tracking");
}
/* We only want prepared classes and arrays */
status = classStatus(klass);
if ( (status & wanted) != 0 ) {
classTrack_addPreparedClass(env, klass);
}
}
} else {
jvmtiDeallocate(classes);
EXIT_ERROR(AGENT_ERROR_OUT_OF_MEMORY,"KlassNode");
jint classCount;
jclass *classes;
jvmtiError error;
jint i;
error = allLoadedClasses(&classes, &classCount);
if ( error == JVMTI_ERROR_NONE ) {
for (i = 0; i < classCount; i++) {
jclass klass = classes[i];
jint status;
jint wanted = JVMTI_CLASS_STATUS_PREPARED | JVMTI_CLASS_STATUS_ARRAY;
status = classStatus(klass);
if ((status & wanted) != 0) {
classTrack_addPreparedClass(env, klass);
}
jvmtiDeallocate(classes);
} else {
EXIT_ERROR(error,"loaded classes array");
}
} END_WITH_LOCAL_REFS(env)
jvmtiDeallocate(classes);
} else {
EXIT_ERROR(error,"loaded classes array");
}
}
/*
* Called to activate class-tracking when a listener registers for EI_GC_FINISH.
*/
void
classTrack_activate(JNIEnv *env)
{
debugMonitorEnter(classTrackLock);
deletedSignatures = bagCreateBag(sizeof(char*), 1000);
debugMonitorExit(classTrackLock);
}
static jboolean
cleanDeleted(void *signatureVoid, void *arg)
{
char* sig = *(char**)signatureVoid;
jvmtiDeallocate(sig);
return JNI_TRUE;
}
/*
* Called when agent detaches.
*/
void
classTrack_reset(void)
{
debugMonitorEnter(classTrackLock);
if (deletedSignatures != NULL) {
bagEnumerateOver(deletedSignatures, cleanDeleted, NULL);
bagDestroyBag(deletedSignatures);
deletedSignatures = NULL;
}
debugMonitorExit(classTrackLock);
}

View File

@ -45,6 +45,12 @@ classTrack_addPreparedClass(JNIEnv *env, jclass klass);
void
classTrack_initialize(JNIEnv *env);
/*
* Activates class tracking.
*/
void
classTrack_activate(JNIEnv *env);
/*
* Reset class tracking.
*/

View File

@ -1625,6 +1625,9 @@ installHandler(HandlerNode *node,
node->handlerID = external? ++requestIdCounter : 0;
error = eventFilterRestricted_install(node);
if (node->ei == EI_GC_FINISH) {
classTrack_activate(getEnv());
}
if (error == JVMTI_ERROR_NONE) {
insert(getHandlerChain(node->ei), node);
}

View File

@ -1742,7 +1742,7 @@ isMethodObsolete(jmethodID method)
}
/* Get the jvmti environment to be used with tags */
static jvmtiEnv *
jvmtiEnv *
getSpecialJvmti(void)
{
jvmtiEnv *jvmti;

View File

@ -414,4 +414,6 @@ void createLocalRefSpace(JNIEnv *env, jint capacity);
void saveGlobalRef(JNIEnv *env, jobject obj, jobject *pobj);
void tossGlobalRef(JNIEnv *env, jobject *pobj);
jvmtiEnv* getSpecialJvmti(void);
#endif

View File

@ -0,0 +1,84 @@
/*
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8241900
* @summary Loop unswitching may cause dependence on null check to be lost
*
* @requires vm.compiler2.enabled
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:-TieredCompilation -XX:-BackgroundCompilation -XX:+StressGCM -XX:+StressLCM TestLoopUnswitchingLostCastDependency
*/
import java.util.Arrays;
public class TestLoopUnswitchingLostCastDependency {
private static Object objectField;
public static void main(String[] args) {
Object[] array = new Object[100];
Arrays.fill(array, new Object());
for (int i = 0; i < 20_000; i++) {
array[1] = null;
test(array);
array[1] = new Object();
objectField = null;
test(array);
array[1] = new Object();
objectField = new Object();
test(array);
}
}
private static void test(Object[] array) {
Object o = objectField;
Object o3 = array[1];
int j = 0;
for (int i = 1; i < 100; i *= 2) {
Object o2 = array[i];
// Both branches taken: loop is unswitched.
if (o3 != null) {
if (o2 == null) {
}
// Both branches taken: loop is unswitched next.
if (o != null) {
// CastPP here becomes control dependent on o2 ==
// null check above.
if (o.getClass() == Object.class) {
}
// This causes partial peeling. When that happens,
// the o2 == null check becomes redundant with the
// o3 != null check in the peeled iteration. The
// CastPP with o as input that was control
// dependent on the o2 == null check becomes
// control dependent on the o3 != null check,
// above the o != null check.
if (j > 7) {
}
j++;
}
}
}
}
}

View File

@ -56,7 +56,7 @@ public class TestObjItrWithHeapDump {
}
String[][][] modeHeuristics = new String[][][] {
{{"normal"}, {"adaptive", "compact", "static", "aggressive"}},
{{"satb"}, {"adaptive", "compact", "static", "aggressive"}},
{{"iu"}, {"adaptive", "aggressive"}},
{{"passive"}, {"passive"}}
};

View File

@ -23,29 +23,59 @@
/**
* @test
* @bug 8237837
* @bug 8237837 8244721
* @summary Shenandoah: assert(mem == __null) failed: only one safepoint
* @key gc
* @requires vm.flavor == "server"
* @requires vm.gc.Shenandoah & !vm.graal.enabled
*
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xcomp -XX:CompileOnly=BarrierInInfiniteLoop::test -XX:CompileCommand=quiet BarrierInInfiniteLoop
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -Xcomp -XX:CompileOnly=BarrierInInfiniteLoop::test1
* -XX:CompileOnly=BarrierInInfiniteLoop::test2 -XX:CompileOnly=BarrierInInfiniteLoop::test3 -XX:CompileCommand=quiet BarrierInInfiniteLoop
*
*/
public class BarrierInInfiniteLoop {
private static Object field1 = new Object();
private static Object field2 = new Object();
private static int field3;
public static void main(String[] args) {
test(false);
test1(false);
test2(false, false);
test3(false);
}
private static void test(boolean flag) {
private static void test1(boolean flag) {
if (flag) {
for (;;) {
field1 = field2;
}
}
}
private static void test2(boolean flag1, boolean flag2) {
if (flag1) {
for (;;) {
for (;;) {
if (flag2) {
break;
}
field1 = field2;
}
}
}
}
private static void test3(boolean flag) {
if (flag) {
for (;;) {
for (;;) {
field3 = 42;
if (field1 == field2) {
break;
}
}
}
}
}
}

View File

@ -0,0 +1,80 @@
/*
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8244663
* @summary Shenandoah: C2 assertion fails in Matcher::collect_null_checks
* @key gc
* @requires vm.flavor == "server"
* @requires vm.gc.Shenandoah & !vm.graal.enabled
*
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:-TieredCompilation -XX:-BackgroundCompilation -XX:-UseOnStackReplacement
* -XX:CompileCommand=dontinline,TestShenandoahCmpPAfterCall::not_inlined TestShenandoahCmpPAfterCall
*
*/
public class TestShenandoahCmpPAfterCall {
private static Object field1 = new Object();
private static Object field2 = new Object();
private static Object o3;
private static volatile int barrier;
public static void main(String[] args) {
for (int i = 0; i < 20_000; i++) {
test();
}
}
private static void test() {
Object o1 = null;
Object o2 = field2;
try {
not_inlined();
o1 = field1;
if (o1 == o2) {
}
} catch (Exception1 ex1) {
o1 = field1;
if (o1 == o2) {
}
}
barrier = 42;
if (o1 == o2) {
}
}
static int count = 0;
private static void not_inlined() throws Exception1 {
count++;
if ((count % 100) == 0) {
throw new Exception1();
}
}
private static class Exception1 extends Exception {
}
}

View File

@ -125,7 +125,7 @@ public class TestClassLoaderLeak {
}
String[][][] modeHeuristics = new String[][][] {
{{"normal"}, {"adaptive", "compact", "static", "aggressive"}},
{{"satb"}, {"adaptive", "compact", "static", "aggressive"}},
{{"iu"}, {"adaptive", "aggressive"}},
{{"passive"}, {"passive"}}
};

View File

@ -45,7 +45,7 @@ public class TestModeUnlock {
}
public static void main(String[] args) throws Exception {
testWith("-XX:ShenandoahGCMode=normal", Mode.PRODUCT);
testWith("-XX:ShenandoahGCMode=satb", Mode.PRODUCT);
testWith("-XX:ShenandoahGCMode=iu", Mode.EXPERIMENTAL);
testWith("-XX:ShenandoahGCMode=passive", Mode.DIAGNOSTIC);
}