mirror of
https://github.com/darlinghq/darling-openjdk.git
synced 2024-12-02 09:27:07 +00:00
8137099: G1 needs to "upgrade" GC within the safepoint if it can't allocate during that safepoint to avoid OoME
During a minor GC, if memory allocation fails, start a full GC within the same VM operation in the same safepoint. This avoids a race where the GC locker can prevent the full GC from occurring, and a premature OoME. Co-authored-by: Axel Siebenborn <axel.siebenborn@sap.com> Reviewed-by: ehelin, sjohanss, phh
This commit is contained in:
parent
906712e7c8
commit
6e9f44c74c
@ -44,6 +44,7 @@ $(eval $(call IncludeCustomExtension, test/JtregNativeHotspot.gmk))
|
||||
# Add more directories here when needed.
|
||||
BUILD_HOTSPOT_JTREG_NATIVE_SRC += \
|
||||
$(TOPDIR)/test/hotspot/jtreg/gc/g1/TestJNIWeakG1 \
|
||||
$(TOPDIR)/test/hotspot/jtreg/gc/stress/TestJNIBlockFullGC \
|
||||
$(TOPDIR)/test/hotspot/jtreg/gc/stress/gclocker \
|
||||
$(TOPDIR)/test/hotspot/jtreg/gc/cslocker \
|
||||
$(TOPDIR)/test/hotspot/jtreg/native_sanity \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -445,9 +445,7 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!is_humongous(word_size), "we do not allow humongous TLABs");
|
||||
|
||||
uint dummy_gc_count_before;
|
||||
uint dummy_gclocker_retry_count = 0;
|
||||
return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
|
||||
return attempt_allocation(word_size);
|
||||
}
|
||||
|
||||
HeapWord*
|
||||
@ -455,62 +453,16 @@ G1CollectedHeap::mem_allocate(size_t word_size,
|
||||
bool* gc_overhead_limit_was_exceeded) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
|
||||
// Loop until the allocation is satisfied, or unsatisfied after GC.
|
||||
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
|
||||
uint gc_count_before;
|
||||
|
||||
HeapWord* result = NULL;
|
||||
if (!is_humongous(word_size)) {
|
||||
result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
|
||||
} else {
|
||||
result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
|
||||
}
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Create the garbage collection operation...
|
||||
VM_G1CollectForAllocation op(gc_count_before, word_size);
|
||||
op.set_allocation_context(AllocationContext::current());
|
||||
|
||||
// ...and get the VM thread to execute it.
|
||||
VMThread::execute(&op);
|
||||
|
||||
if (op.prologue_succeeded() && op.pause_succeeded()) {
|
||||
// If the operation was successful we'll return the result even
|
||||
// if it is NULL. If the allocation attempt failed immediately
|
||||
// after a Full GC, it's unlikely we'll be able to allocate now.
|
||||
HeapWord* result = op.result();
|
||||
if (result != NULL && !is_humongous(word_size)) {
|
||||
// Allocations that take place on VM operations do not do any
|
||||
// card dirtying and we have to do it here. We only have to do
|
||||
// this for non-humongous allocations, though.
|
||||
dirty_young_block(result, word_size);
|
||||
}
|
||||
return result;
|
||||
} else {
|
||||
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
|
||||
return NULL;
|
||||
}
|
||||
assert(op.result() == NULL,
|
||||
"the result should be NULL if the VM op did not succeed");
|
||||
}
|
||||
|
||||
// Give a warning if we seem to be looping forever.
|
||||
if ((QueuedAllocationWarningCount > 0) &&
|
||||
(try_count % QueuedAllocationWarningCount == 0)) {
|
||||
log_warning(gc)("G1CollectedHeap::mem_allocate retries %d times", try_count);
|
||||
}
|
||||
if (is_humongous(word_size)) {
|
||||
return attempt_allocation_humongous(word_size);
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
return attempt_allocation(word_size);
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
AllocationContext_t context,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
AllocationContext_t context) {
|
||||
ResourceMark rm; // For retrieving the thread names in log messages.
|
||||
|
||||
// Make sure you read the note in attempt_allocation_humongous().
|
||||
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
@ -525,7 +477,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
// fails to perform the allocation. b) is the only case when we'll
|
||||
// return NULL.
|
||||
HeapWord* result = NULL;
|
||||
for (int try_count = 1; /* we'll return */; try_count += 1) {
|
||||
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
|
||||
bool should_try_gc;
|
||||
uint gc_count_before;
|
||||
|
||||
@ -536,30 +488,23 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
return result;
|
||||
}
|
||||
|
||||
if (GCLocker::is_active_and_needs_gc()) {
|
||||
if (g1_policy()->can_expand_young_list()) {
|
||||
// No need for an ergo verbose message here,
|
||||
// can_expand_young_list() does this when it returns true.
|
||||
result = _allocator->attempt_allocation_force(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
should_try_gc = false;
|
||||
} else {
|
||||
// The GCLocker may not be active but the GCLocker initiated
|
||||
// GC may not yet have been performed (GCLocker::needs_gc()
|
||||
// returns true). In this case we do not try this GC and
|
||||
// wait until the GCLocker initiated GC is performed, and
|
||||
// then retry the allocation.
|
||||
if (GCLocker::needs_gc()) {
|
||||
should_try_gc = false;
|
||||
} else {
|
||||
// Read the GC count while still holding the Heap_lock.
|
||||
gc_count_before = total_collections();
|
||||
should_try_gc = true;
|
||||
// If the GCLocker is active and we are bound for a GC, try expanding young gen.
|
||||
// This is different to when only GCLocker::needs_gc() is set: try to avoid
|
||||
// waiting because the GCLocker is active to not wait too long.
|
||||
if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) {
|
||||
// No need for an ergo message here, can_expand_young_list() does this when
|
||||
// it returns true.
|
||||
result = _allocator->attempt_allocation_force(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
|
||||
// the GCLocker initiated GC has been performed and then retry. This includes
|
||||
// the case when the GC Locker is not active but has not been performed.
|
||||
should_try_gc = !GCLocker::needs_gc();
|
||||
// Read the GC count while still holding the Heap_lock.
|
||||
gc_count_before = total_collections();
|
||||
}
|
||||
|
||||
if (should_try_gc) {
|
||||
@ -568,28 +513,33 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
GCCause::_g1_inc_collection_pause);
|
||||
if (result != NULL) {
|
||||
assert(succeeded, "only way to get back a non-NULL result");
|
||||
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
|
||||
Thread::current()->name(), p2i(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
if (succeeded) {
|
||||
// If we get here we successfully scheduled a collection which
|
||||
// failed to allocate. No point in trying to allocate
|
||||
// further. We'll just return NULL.
|
||||
MutexLockerEx x(Heap_lock);
|
||||
*gc_count_before_ret = total_collections();
|
||||
// We successfully scheduled a collection which failed to allocate. No
|
||||
// point in trying to allocate further. We'll just return NULL.
|
||||
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
|
||||
SIZE_FORMAT " words", Thread::current()->name(), word_size);
|
||||
return NULL;
|
||||
}
|
||||
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
|
||||
Thread::current()->name(), word_size);
|
||||
} else {
|
||||
if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
|
||||
MutexLockerEx x(Heap_lock);
|
||||
*gc_count_before_ret = total_collections();
|
||||
// Failed to schedule a collection.
|
||||
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
|
||||
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
|
||||
SIZE_FORMAT " words", Thread::current()->name(), word_size);
|
||||
return NULL;
|
||||
}
|
||||
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
|
||||
// The GCLocker is either active or the GCLocker initiated
|
||||
// GC has not yet been performed. Stall until it is and
|
||||
// then retry the allocation.
|
||||
GCLocker::stall_until_clear();
|
||||
(*gclocker_retry_count_ret) += 1;
|
||||
gclocker_retry_count += 1;
|
||||
}
|
||||
|
||||
// We can reach here if we were unsuccessful in scheduling a
|
||||
@ -600,6 +550,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
// first attempt (without holding the Heap_lock) here and the
|
||||
// follow-on attempt will be at the start of the next loop
|
||||
// iteration (after taking the Heap_lock).
|
||||
|
||||
result = _allocator->attempt_allocation(word_size, context);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
@ -608,8 +559,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
|
||||
// Give a warning if we seem to be looping forever.
|
||||
if ((QueuedAllocationWarningCount > 0) &&
|
||||
(try_count % QueuedAllocationWarningCount == 0)) {
|
||||
log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
|
||||
"retries %d times", try_count);
|
||||
log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
|
||||
Thread::current()->name(), try_count, word_size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -830,9 +781,7 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
|
||||
}
|
||||
}
|
||||
|
||||
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
assert(!is_humongous(word_size), "attempt_allocation() should not "
|
||||
"be called for humongous allocation requests");
|
||||
@ -841,10 +790,7 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
|
||||
HeapWord* result = _allocator->attempt_allocation(word_size, context);
|
||||
|
||||
if (result == NULL) {
|
||||
result = attempt_allocation_slow(word_size,
|
||||
context,
|
||||
gc_count_before_ret,
|
||||
gclocker_retry_count_ret);
|
||||
result = attempt_allocation_slow(word_size, context);
|
||||
}
|
||||
assert_heap_not_locked();
|
||||
if (result != NULL) {
|
||||
@ -925,9 +871,9 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
|
||||
decrease_used(size_used);
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret) {
|
||||
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
|
||||
ResourceMark rm; // For retrieving the thread names in log messages.
|
||||
|
||||
// The structure of this method has a lot of similarities to
|
||||
// attempt_allocation_slow(). The reason these two were not merged
|
||||
// into a single one is that such a method would require several "if
|
||||
@ -958,10 +904,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
// fails to perform the allocation. b) is the only case when we'll
|
||||
// return NULL.
|
||||
HeapWord* result = NULL;
|
||||
for (int try_count = 1; /* we'll return */; try_count += 1) {
|
||||
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
|
||||
bool should_try_gc;
|
||||
uint gc_count_before;
|
||||
|
||||
|
||||
{
|
||||
MutexLockerEx x(Heap_lock);
|
||||
|
||||
@ -975,69 +922,63 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
|
||||
return result;
|
||||
}
|
||||
|
||||
if (GCLocker::is_active_and_needs_gc()) {
|
||||
should_try_gc = false;
|
||||
} else {
|
||||
// The GCLocker may not be active but the GCLocker initiated
|
||||
// GC may not yet have been performed (GCLocker::needs_gc()
|
||||
// returns true). In this case we do not try this GC and
|
||||
// wait until the GCLocker initiated GC is performed, and
|
||||
// then retry the allocation.
|
||||
if (GCLocker::needs_gc()) {
|
||||
should_try_gc = false;
|
||||
} else {
|
||||
// Read the GC count while still holding the Heap_lock.
|
||||
gc_count_before = total_collections();
|
||||
should_try_gc = true;
|
||||
}
|
||||
}
|
||||
// Only try a GC if the GCLocker does not signal the need for a GC. Wait until
|
||||
// the GCLocker initiated GC has been performed and then retry. This includes
|
||||
// the case when the GC Locker is not active but has not been performed.
|
||||
should_try_gc = !GCLocker::needs_gc();
|
||||
// Read the GC count while still holding the Heap_lock.
|
||||
gc_count_before = total_collections();
|
||||
}
|
||||
|
||||
if (should_try_gc) {
|
||||
// If we failed to allocate the humongous object, we should try to
|
||||
// do a collection pause (if we're allowed) in case it reclaims
|
||||
// enough space for the allocation to succeed after the pause.
|
||||
|
||||
bool succeeded;
|
||||
result = do_collection_pause(word_size, gc_count_before, &succeeded,
|
||||
GCCause::_g1_humongous_allocation);
|
||||
if (result != NULL) {
|
||||
assert(succeeded, "only way to get back a non-NULL result");
|
||||
log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
|
||||
Thread::current()->name(), p2i(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
if (succeeded) {
|
||||
// If we get here we successfully scheduled a collection which
|
||||
// failed to allocate. No point in trying to allocate
|
||||
// further. We'll just return NULL.
|
||||
MutexLockerEx x(Heap_lock);
|
||||
*gc_count_before_ret = total_collections();
|
||||
// We successfully scheduled a collection which failed to allocate. No
|
||||
// point in trying to allocate further. We'll just return NULL.
|
||||
log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
|
||||
SIZE_FORMAT " words", Thread::current()->name(), word_size);
|
||||
return NULL;
|
||||
}
|
||||
log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
|
||||
Thread::current()->name(), word_size);
|
||||
} else {
|
||||
if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
|
||||
MutexLockerEx x(Heap_lock);
|
||||
*gc_count_before_ret = total_collections();
|
||||
// Failed to schedule a collection.
|
||||
if (gclocker_retry_count > GCLockerRetryAllocationCount) {
|
||||
log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
|
||||
SIZE_FORMAT " words", Thread::current()->name(), word_size);
|
||||
return NULL;
|
||||
}
|
||||
log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
|
||||
// The GCLocker is either active or the GCLocker initiated
|
||||
// GC has not yet been performed. Stall until it is and
|
||||
// then retry the allocation.
|
||||
GCLocker::stall_until_clear();
|
||||
(*gclocker_retry_count_ret) += 1;
|
||||
gclocker_retry_count += 1;
|
||||
}
|
||||
|
||||
|
||||
// We can reach here if we were unsuccessful in scheduling a
|
||||
// collection (because another thread beat us to it) or if we were
|
||||
// stalled due to the GC locker. In either can we should retry the
|
||||
// allocation attempt in case another thread successfully
|
||||
// performed a collection and reclaimed enough space. Give a
|
||||
// warning if we seem to be looping forever.
|
||||
// performed a collection and reclaimed enough space.
|
||||
// Humongous object allocation always needs a lock, so we wait for the retry
|
||||
// in the next iteration of the loop, unlike for the regular iteration case.
|
||||
// Give a warning if we seem to be looping forever.
|
||||
|
||||
if ((QueuedAllocationWarningCount > 0) &&
|
||||
(try_count % QueuedAllocationWarningCount == 0)) {
|
||||
log_warning(gc)("G1CollectedHeap::attempt_allocation_humongous() "
|
||||
"retries %d times", try_count);
|
||||
log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
|
||||
Thread::current()->name(), try_count, word_size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1339,7 +1280,6 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
|
||||
context,
|
||||
expect_null_mutator_alloc_region);
|
||||
if (result != NULL) {
|
||||
assert(*gc_succeeded, "sanity");
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1349,7 +1289,6 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
|
||||
// do something smarter than full collection to satisfy a failed alloc.)
|
||||
result = expand_and_allocate(word_size, context);
|
||||
if (result != NULL) {
|
||||
assert(*gc_succeeded, "sanity");
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1401,7 +1340,6 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
succeeded);
|
||||
|
||||
if (result != NULL) {
|
||||
assert(*succeeded, "sanity");
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1412,7 +1350,6 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
|
||||
// space available is large enough for the allocation, then a more
|
||||
// complete compaction phase than we've tried so far might be
|
||||
// appropriate.
|
||||
assert(*succeeded, "sanity");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2147,7 +2084,7 @@ void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
|
||||
// This notify_all() will ensure that a thread that called
|
||||
// System.gc() with (with ExplicitGCInvokesConcurrent set or not)
|
||||
// and it's waiting for a full GC to finish will be woken up. It is
|
||||
// waiting in VM_G1IncCollectionPause::doit_epilogue().
|
||||
// waiting in VM_G1CollectForAllocation::doit_epilogue().
|
||||
FullGCCount_lock->notify_all();
|
||||
}
|
||||
|
||||
@ -2175,13 +2112,12 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
// Schedule an initial-mark evacuation pause that will start a
|
||||
// concurrent cycle. We're setting word_size to 0 which means that
|
||||
// we are not requesting a post-GC allocation.
|
||||
VM_G1IncCollectionPause op(gc_count_before,
|
||||
0, /* word_size */
|
||||
true, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
cause);
|
||||
op.set_allocation_context(AllocationContext::current());
|
||||
|
||||
VM_G1CollectForAllocation op(0, /* word_size */
|
||||
gc_count_before,
|
||||
cause,
|
||||
true, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
AllocationContext::current());
|
||||
VMThread::execute(&op);
|
||||
if (!op.pause_succeeded()) {
|
||||
if (old_marking_count_before == _old_marking_cycles_started) {
|
||||
@ -2204,11 +2140,12 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||
|
||||
// Schedule a standard evacuation pause. We're setting word_size
|
||||
// to 0 which means that we are not requesting a post-GC allocation.
|
||||
VM_G1IncCollectionPause op(gc_count_before,
|
||||
0, /* word_size */
|
||||
false, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
cause);
|
||||
VM_G1CollectForAllocation op(0, /* word_size */
|
||||
gc_count_before,
|
||||
cause,
|
||||
false, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
AllocationContext::current());
|
||||
VMThread::execute(&op);
|
||||
} else {
|
||||
// Schedule a Full GC.
|
||||
@ -2619,13 +2556,12 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
|
||||
bool* succeeded,
|
||||
GCCause::Cause gc_cause) {
|
||||
assert_heap_not_locked_and_not_at_safepoint();
|
||||
VM_G1IncCollectionPause op(gc_count_before,
|
||||
word_size,
|
||||
false, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
gc_cause);
|
||||
|
||||
op.set_allocation_context(AllocationContext::current());
|
||||
VM_G1CollectForAllocation op(word_size,
|
||||
gc_count_before,
|
||||
gc_cause,
|
||||
false, /* should_initiate_conc_mark */
|
||||
g1_policy()->max_pause_time_ms(),
|
||||
AllocationContext::current());
|
||||
VMThread::execute(&op);
|
||||
|
||||
HeapWord* result = op.result();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -125,7 +125,6 @@ class G1CollectedHeap : public CollectedHeap {
|
||||
friend class VM_CollectForMetadataAllocation;
|
||||
friend class VM_G1CollectForAllocation;
|
||||
friend class VM_G1CollectFull;
|
||||
friend class VM_G1IncCollectionPause;
|
||||
friend class VMStructs;
|
||||
friend class MutatorAllocRegion;
|
||||
friend class G1FullCollector;
|
||||
@ -454,35 +453,20 @@ protected:
|
||||
virtual HeapWord* mem_allocate(size_t word_size,
|
||||
bool* gc_overhead_limit_was_exceeded);
|
||||
|
||||
// The following three methods take a gc_count_before_ret
|
||||
// parameter which is used to return the GC count if the method
|
||||
// returns NULL. Given that we are required to read the GC count
|
||||
// while holding the Heap_lock, and these paths will take the
|
||||
// Heap_lock at some point, it's easier to get them to read the GC
|
||||
// count while holding the Heap_lock before they return NULL instead
|
||||
// of the caller (namely: mem_allocate()) having to also take the
|
||||
// Heap_lock just to read the GC count.
|
||||
|
||||
// First-level mutator allocation attempt: try to allocate out of
|
||||
// the mutator alloc region without taking the Heap_lock. This
|
||||
// should only be used for non-humongous allocations.
|
||||
inline HeapWord* attempt_allocation(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret);
|
||||
inline HeapWord* attempt_allocation(size_t word_size);
|
||||
|
||||
// Second-level mutator allocation attempt: take the Heap_lock and
|
||||
// retry the allocation attempt, potentially scheduling a GC
|
||||
// pause. This should only be used for non-humongous allocations.
|
||||
HeapWord* attempt_allocation_slow(size_t word_size,
|
||||
AllocationContext_t context,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret);
|
||||
AllocationContext_t context);
|
||||
|
||||
// Takes the Heap_lock and attempts a humongous allocation. It can
|
||||
// potentially schedule a GC pause.
|
||||
HeapWord* attempt_allocation_humongous(size_t word_size,
|
||||
uint* gc_count_before_ret,
|
||||
uint* gclocker_retry_count_ret);
|
||||
HeapWord* attempt_allocation_humongous(size_t word_size);
|
||||
|
||||
// Allocation attempt that should be called during safepoints (e.g.,
|
||||
// at the end of a successful GC). expect_null_mutator_alloc_region
|
||||
@ -1078,6 +1062,11 @@ public:
|
||||
return _hrm.available() == 0;
|
||||
}
|
||||
|
||||
// Returns whether there are any regions left in the heap for allocation.
|
||||
bool has_regions_left_for_allocation() const {
|
||||
return !is_maximal_no_gc() || num_free_regions() != 0;
|
||||
}
|
||||
|
||||
// The current number of regions in the heap.
|
||||
uint num_regions() const { return _hrm.length(); }
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -33,34 +33,21 @@
|
||||
#include "gc/shared/isGCActiveMark.hpp"
|
||||
#include "runtime/interfaceSupport.hpp"
|
||||
|
||||
VM_G1CollectForAllocation::VM_G1CollectForAllocation(uint gc_count_before,
|
||||
size_t word_size)
|
||||
: VM_G1OperationWithAllocRequest(gc_count_before, word_size,
|
||||
GCCause::_allocation_failure) {
|
||||
guarantee(word_size != 0, "An allocation should always be requested with this operation.");
|
||||
}
|
||||
|
||||
void VM_G1CollectForAllocation::doit() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
GCCauseSetter x(g1h, _gc_cause);
|
||||
|
||||
_result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
|
||||
assert(_result == NULL || _pause_succeeded,
|
||||
"if we get back a result, the pause should have succeeded");
|
||||
}
|
||||
|
||||
void VM_G1CollectFull::doit() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
GCCauseSetter x(g1h, _gc_cause);
|
||||
g1h->do_full_collection(false /* clear_all_soft_refs */);
|
||||
}
|
||||
|
||||
VM_G1IncCollectionPause::VM_G1IncCollectionPause(uint gc_count_before,
|
||||
size_t word_size,
|
||||
bool should_initiate_conc_mark,
|
||||
double target_pause_time_ms,
|
||||
GCCause::Cause gc_cause)
|
||||
: VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
|
||||
VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size,
|
||||
uint gc_count_before,
|
||||
GCCause::Cause gc_cause,
|
||||
bool should_initiate_conc_mark,
|
||||
double target_pause_time_ms,
|
||||
AllocationContext_t allocation_context)
|
||||
: VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
|
||||
_pause_succeeded(false),
|
||||
_allocation_context(allocation_context),
|
||||
_should_initiate_conc_mark(should_initiate_conc_mark),
|
||||
_target_pause_time_ms(target_pause_time_ms),
|
||||
_should_retry_gc(false),
|
||||
@ -71,8 +58,8 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(uint gc_count_before,
|
||||
_gc_cause = gc_cause;
|
||||
}
|
||||
|
||||
bool VM_G1IncCollectionPause::doit_prologue() {
|
||||
bool res = VM_G1OperationWithAllocRequest::doit_prologue();
|
||||
bool VM_G1CollectForAllocation::doit_prologue() {
|
||||
bool res = VM_CollectForAllocation::doit_prologue();
|
||||
if (!res) {
|
||||
if (_should_initiate_conc_mark) {
|
||||
// The prologue can fail for a couple of reasons. The first is that another GC
|
||||
@ -87,7 +74,7 @@ bool VM_G1IncCollectionPause::doit_prologue() {
|
||||
return res;
|
||||
}
|
||||
|
||||
void VM_G1IncCollectionPause::doit() {
|
||||
void VM_G1CollectForAllocation::doit() {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
|
||||
"only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
|
||||
@ -95,7 +82,7 @@ void VM_G1IncCollectionPause::doit() {
|
||||
if (_word_size > 0) {
|
||||
// An allocation has been requested. So, try to do that first.
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||
allocation_context(),
|
||||
_allocation_context,
|
||||
false /* expect_null_cur_alloc_region */);
|
||||
if (_result != NULL) {
|
||||
// If we can successfully allocate before we actually do the
|
||||
@ -144,27 +131,34 @@ void VM_G1IncCollectionPause::doit() {
|
||||
}
|
||||
}
|
||||
|
||||
_pause_succeeded =
|
||||
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
|
||||
if (_pause_succeeded && _word_size > 0) {
|
||||
// An allocation had been requested.
|
||||
_result = g1h->attempt_allocation_at_safepoint(_word_size,
|
||||
allocation_context(),
|
||||
true /* expect_null_cur_alloc_region */);
|
||||
// Try a partial collection of some kind.
|
||||
_pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
|
||||
|
||||
if (_pause_succeeded) {
|
||||
if (_word_size > 0) {
|
||||
// An allocation had been requested. Do it, eventually trying a stronger
|
||||
// kind of GC.
|
||||
_result = g1h->satisfy_failed_allocation(_word_size, _allocation_context, &_pause_succeeded);
|
||||
} else if (!g1h->has_regions_left_for_allocation()) {
|
||||
// There has been a request to perform a GC to free some space. We have no
|
||||
// information on how much memory has been asked for. In case there are
|
||||
// absolutely no regions left to allocate into, do a maximally compacting full GC.
|
||||
log_info(gc, ergo)("Attempting maximally compacting collection");
|
||||
_pause_succeeded = g1h->do_full_collection(false, /* explicit gc */
|
||||
true /* clear_all_soft_refs */);
|
||||
}
|
||||
guarantee(_pause_succeeded, "Elevated collections during the safepoint must always succeed.");
|
||||
} else {
|
||||
assert(_result == NULL, "invariant");
|
||||
if (!_pause_succeeded) {
|
||||
// Another possible reason reason for the pause to not be successful
|
||||
// is that, again, the GC locker is active (and has become active
|
||||
// since the prologue was executed). In this case we should retry
|
||||
// the pause after waiting for the GC locker to become inactive.
|
||||
_should_retry_gc = true;
|
||||
}
|
||||
// The only reason for the pause to not be successful is that, the GC locker is
|
||||
// active (or has become active since the prologue was executed). In this case
|
||||
// we should retry the pause after waiting for the GC locker to become inactive.
|
||||
_should_retry_gc = true;
|
||||
}
|
||||
}
|
||||
|
||||
void VM_G1IncCollectionPause::doit_epilogue() {
|
||||
VM_G1OperationWithAllocRequest::doit_epilogue();
|
||||
void VM_G1CollectForAllocation::doit_epilogue() {
|
||||
VM_CollectForAllocation::doit_epilogue();
|
||||
|
||||
// If the pause was initiated by a System.gc() and
|
||||
// +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,26 +32,8 @@
|
||||
// VM_operations for the G1 collector.
|
||||
// VM_GC_Operation:
|
||||
// - VM_CGC_Operation
|
||||
// - VM_G1CollectForAllocation
|
||||
// - VM_G1CollectFull
|
||||
// - VM_G1OperationWithAllocRequest
|
||||
// - VM_G1CollectForAllocation
|
||||
// - VM_G1IncCollectionPause
|
||||
|
||||
class VM_G1OperationWithAllocRequest : public VM_CollectForAllocation {
|
||||
protected:
|
||||
bool _pause_succeeded;
|
||||
AllocationContext_t _allocation_context;
|
||||
|
||||
public:
|
||||
VM_G1OperationWithAllocRequest(uint gc_count_before,
|
||||
size_t word_size,
|
||||
GCCause::Cause gc_cause)
|
||||
: VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
|
||||
_pause_succeeded(false) {}
|
||||
bool pause_succeeded() { return _pause_succeeded; }
|
||||
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
|
||||
AllocationContext_t allocation_context() { return _allocation_context; }
|
||||
};
|
||||
|
||||
class VM_G1CollectFull: public VM_GC_Operation {
|
||||
public:
|
||||
@ -62,41 +44,35 @@ public:
|
||||
virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
|
||||
virtual void doit();
|
||||
virtual const char* name() const {
|
||||
return "full garbage-first collection";
|
||||
return "G1 Full collection";
|
||||
}
|
||||
};
|
||||
|
||||
class VM_G1CollectForAllocation: public VM_G1OperationWithAllocRequest {
|
||||
public:
|
||||
VM_G1CollectForAllocation(uint gc_count_before,
|
||||
size_t word_size);
|
||||
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
|
||||
virtual void doit();
|
||||
virtual const char* name() const {
|
||||
return "garbage-first collection to satisfy allocation";
|
||||
}
|
||||
};
|
||||
|
||||
class VM_G1IncCollectionPause: public VM_G1OperationWithAllocRequest {
|
||||
class VM_G1CollectForAllocation: public VM_CollectForAllocation {
|
||||
private:
|
||||
bool _pause_succeeded;
|
||||
AllocationContext_t _allocation_context;
|
||||
|
||||
bool _should_initiate_conc_mark;
|
||||
bool _should_retry_gc;
|
||||
double _target_pause_time_ms;
|
||||
uint _old_marking_cycles_completed_before;
|
||||
public:
|
||||
VM_G1IncCollectionPause(uint gc_count_before,
|
||||
size_t word_size,
|
||||
bool should_initiate_conc_mark,
|
||||
double target_pause_time_ms,
|
||||
GCCause::Cause gc_cause);
|
||||
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
|
||||
VM_G1CollectForAllocation(size_t word_size,
|
||||
uint gc_count_before,
|
||||
GCCause::Cause gc_cause,
|
||||
bool should_initiate_conc_mark,
|
||||
double target_pause_time_ms,
|
||||
AllocationContext_t allocation_context);
|
||||
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
|
||||
virtual bool doit_prologue();
|
||||
virtual void doit();
|
||||
virtual void doit_epilogue();
|
||||
virtual const char* name() const {
|
||||
return "garbage-first incremental collection pause";
|
||||
return "G1 collect for allocation";
|
||||
}
|
||||
bool should_retry_gc() const { return _should_retry_gc; }
|
||||
bool pause_succeeded() { return _pause_succeeded; }
|
||||
};
|
||||
|
||||
// Concurrent GC stop-the-world operations such as remark and cleanup;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,9 +67,8 @@
|
||||
template(CGC_Operation) \
|
||||
template(CMS_Initial_Mark) \
|
||||
template(CMS_Final_Remark) \
|
||||
template(G1CollectFull) \
|
||||
template(G1CollectForAllocation) \
|
||||
template(G1IncCollectionPause) \
|
||||
template(G1CollectFull) \
|
||||
template(HandshakeOneThread) \
|
||||
template(HandshakeAllThreads) \
|
||||
template(HandshakeFallback) \
|
||||
|
@ -71,6 +71,7 @@ gc/g1/TestVerifyGCType.java 8193067 generic-all
|
||||
gc/stress/gclocker/TestGCLockerWithParallel.java 8180622 generic-all
|
||||
gc/stress/gclocker/TestGCLockerWithG1.java 8179226 generic-all
|
||||
gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java 8177765 generic-all
|
||||
gc/stress/TestJNIBlockFullGC/TestJNIBlockFullGC.java 8192647 generic-all
|
||||
|
||||
#############################################################################
|
||||
|
||||
|
@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, SAP SE and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test TestJNIBlockFullGC
|
||||
* @summary Check that in G1 a Full GC to reclaim space can not be blocked out by the GC locker.
|
||||
* @key gc
|
||||
* @requires vm.gc.G1
|
||||
* @run main/othervm/native -Xmx64m -XX:+UseG1GC -Xlog:gc=info,gc+alloc=trace -XX:MaxGCPauseMillis=10 TestJNIBlockFullGC 10 10000 10000 10000 30000 10000 0.7
|
||||
*/
|
||||
|
||||
import java.lang.ref.SoftReference;
|
||||
|
||||
public class TestJNIBlockFullGC {
|
||||
|
||||
static {
|
||||
System.loadLibrary("TestJNIBlockFullGC");
|
||||
}
|
||||
|
||||
public static volatile Object tmp;
|
||||
|
||||
public static volatile boolean hadError = false;
|
||||
|
||||
private static native int TestCriticalArray0(int[] x);
|
||||
|
||||
public static class Node {
|
||||
public SoftReference<Node> next;
|
||||
long payload1;
|
||||
long payload2;
|
||||
long payload3;
|
||||
long payload4;
|
||||
|
||||
public Node(int load) {
|
||||
payload1 = payload2 = payload3 = payload4 = load;
|
||||
}
|
||||
}
|
||||
|
||||
public static void warmUp(long warmupEndTime, int size) {
|
||||
// First let the GC assume most of our objects will die.
|
||||
Node[] roots = new Node[size];
|
||||
|
||||
while (System.currentTimeMillis() < warmupEndTime) {
|
||||
int index = (int) (Math.random() * roots.length);
|
||||
roots[index] = new Node(1);
|
||||
}
|
||||
|
||||
// Make sure the young generation is empty.
|
||||
for (int i = 0; i < roots.length; ++i) {
|
||||
roots[i] = null;
|
||||
}
|
||||
}
|
||||
|
||||
public static void runTest(long endTime, int size, double alive) {
|
||||
final int length = 10000;
|
||||
int[] array1 = new int[length];
|
||||
for (int x = 1; x < length; x++) {
|
||||
array1[x] = x;
|
||||
}
|
||||
|
||||
Node[] roots = new Node[size];
|
||||
try {
|
||||
int index = 0;
|
||||
roots[0] = new Node(0);
|
||||
|
||||
while (!hadError && (System.currentTimeMillis() < endTime)) {
|
||||
int test_val1 = TestCriticalArray0(array1);
|
||||
|
||||
if (Math.random() > alive) {
|
||||
tmp = new Node(test_val1);
|
||||
} else {
|
||||
index = (int) (Math.random() * roots.length);
|
||||
|
||||
if (roots[index] != null) {
|
||||
Node node = new Node(test_val1);
|
||||
node.next = new SoftReference<Node>(roots[index]);
|
||||
roots[index] = node;
|
||||
} else {
|
||||
roots[index] = new Node(test_val1);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (OutOfMemoryError e) {
|
||||
hadError = true;
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private static void joinThreads(Thread[] threads) throws Exception {
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
try {
|
||||
if (threads[i] != null) {
|
||||
threads[i].join();
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (args.length < 7){
|
||||
System.out.println("Usage: java TestJNIBlockFullGC <warmupThreads> <warmup-time-in-millis> <warmup iterations> <threads> <time-in-millis> <iterations> <aliveFrac>");
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
int warmupThreads = Integer.parseInt(args[0]);
|
||||
System.out.println("# Warmup Threads = " + warmupThreads);
|
||||
|
||||
int warmupDuration = Integer.parseInt(args[1]);
|
||||
System.out.println("WarmUp Duration = " + warmupDuration);
|
||||
int warmupIterations = Integer.parseInt(args[2]);
|
||||
System.out.println("# Warmup Iterations = "+ warmupIterations);
|
||||
|
||||
int mainThreads = Integer.parseInt(args[3]);
|
||||
System.out.println("# Main Threads = " + mainThreads);
|
||||
int mainDuration = Integer.parseInt(args[4]);
|
||||
System.out.println("Main Duration = " + mainDuration);
|
||||
int mainIterations = Integer.parseInt(args[5]);
|
||||
System.out.println("# Main Iterations = " + mainIterations);
|
||||
|
||||
double liveFrac = Double.parseDouble(args[6]);
|
||||
System.out.println("Live Fraction = " + liveFrac);
|
||||
|
||||
Thread threads[] = new Thread[Math.max(warmupThreads, mainThreads)];
|
||||
|
||||
System.out.println("Start warm-up threads!");
|
||||
long warmupStartTime = System.currentTimeMillis();
|
||||
for (int i = 0; i < warmupThreads; i++) {
|
||||
threads[i] = new Thread() {
|
||||
public void run() {
|
||||
warmUp(warmupStartTime + warmupDuration, warmupIterations);
|
||||
};
|
||||
};
|
||||
threads[i].start();
|
||||
}
|
||||
|
||||
joinThreads(threads);
|
||||
|
||||
System.gc();
|
||||
System.out.println("Keep alive a lot");
|
||||
|
||||
long startTime = System.currentTimeMillis();
|
||||
for (int i = 0; i < mainThreads; i++) {
|
||||
threads[i] = new Thread() {
|
||||
public void run() {
|
||||
runTest(startTime + mainDuration, mainIterations, liveFrac);
|
||||
};
|
||||
};
|
||||
threads[i].start();
|
||||
}
|
||||
System.out.println("All threads started");
|
||||
|
||||
joinThreads(threads);
|
||||
|
||||
if (hadError) {
|
||||
throw new RuntimeException("Experienced an OoME during execution.");
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, SAP SE and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "jni.h"
|
||||
|
||||
JNIEXPORT jint JNICALL
|
||||
Java_TestJNIBlockFullGC_TestCriticalArray0(JNIEnv *env, jclass jCls, jintArray jIn) {
|
||||
jint *bufIn = NULL;
|
||||
jint jInLen = (*env)->GetArrayLength(env, jIn);
|
||||
jint result = 0;
|
||||
jint i;
|
||||
|
||||
if (jInLen != 0) {
|
||||
bufIn = (jint*)(*env)->GetPrimitiveArrayCritical(env, jIn, 0);
|
||||
}
|
||||
|
||||
for (i = 0; i < jInLen; ++i) {
|
||||
result += bufIn[i]; // result = sum of all array elements
|
||||
}
|
||||
|
||||
if (bufIn != NULL) {
|
||||
(*env)->ReleasePrimitiveArrayCritical(env, jIn, bufIn, 0);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user