8213092: Add more runtime locks for concurrent class unloading

Add locks for calling CLDG::purge concurrently as well and for calling SystemDictionary::do_unloading concurrently.

Reviewed-by: eosterlund, hseigel
This commit is contained in:
Coleen Phillimore 2018-11-16 10:54:04 -05:00
parent dce8ff4dba
commit 673c68d993
6 changed files with 36 additions and 43 deletions

View File

@ -484,7 +484,7 @@ void ClassLoaderData::initialize_holder(Handle loader_or_mirror) {
// Remove a klass from the _klasses list for scratch_class during redefinition
// or parsed class in the case of an error.
void ClassLoaderData::remove_class(Klass* scratch_class) {
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
// Adjust global class iterator.
ClassLoaderDataGraph::adjust_saved_class(scratch_class);
@ -804,7 +804,8 @@ void ClassLoaderData::add_to_deallocate_list(Metadata* m) {
// Deallocate free metadata on the free list. How useful the PermGen was!
void ClassLoaderData::free_deallocate_list() {
// Don't need lock, at safepoint
// This must be called at a safepoint because it depends on metadata walking at
// safepoint cleanup time.
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
if (_deallocate_list == NULL) {
@ -844,8 +845,7 @@ void ClassLoaderData::free_deallocate_list() {
// classes. The metadata is removed with the unloading metaspace.
// There isn't C heap memory allocated for methods, so nothing is done for them.
void ClassLoaderData::free_deallocate_list_C_heap_structures() {
// Don't need lock, at safepoint
assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert(is_unloading(), "only called for ClassLoaderData that are unloading");
if (_deallocate_list == NULL) {
return;

View File

@ -582,7 +582,7 @@ void ClassLoaderDataGraph::clean_module_and_package_info() {
}
void ClassLoaderDataGraph::purge() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
ClassLoaderData* list = _unloading;
_unloading = NULL;
ClassLoaderData* next = list;

View File

@ -204,7 +204,7 @@ bool ModuleEntry::has_reads_list() const {
// Purge dead module entries out of reads list.
void ModuleEntry::purge_reads() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
assert_locked_or_safepoint(Module_lock);
if (_must_walk_reads && has_reads_list()) {
// This module's _must_walk_reads flag will be reset based
@ -245,7 +245,6 @@ void ModuleEntry::module_reads_do(ModuleClosure* f) {
}
void ModuleEntry::delete_reads() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
delete _reads;
_reads = NULL;
}
@ -319,8 +318,6 @@ ModuleEntryTable::ModuleEntryTable(int table_size)
}
ModuleEntryTable::~ModuleEntryTable() {
assert_locked_or_safepoint(Module_lock);
// Walk through all buckets and all entries in each bucket,
// freeing each entry.
for (int i = 0; i < table_size(); ++i) {

View File

@ -125,7 +125,7 @@ void PackageEntry::set_is_exported_allUnnamed() {
// get deleted. This prevents the package from illegally transitioning from
// exported to non-exported.
void PackageEntry::purge_qualified_exports() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
assert_locked_or_safepoint(Module_lock);
if (_must_walk_exports &&
_qualified_exports != NULL &&
!_qualified_exports->is_empty()) {
@ -160,7 +160,6 @@ void PackageEntry::purge_qualified_exports() {
}
void PackageEntry::delete_qualified_exports() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
if (_qualified_exports != NULL) {
delete _qualified_exports;
}
@ -228,29 +227,20 @@ PackageEntry* PackageEntryTable::locked_create_entry_or_null(Symbol* name, Modul
}
PackageEntry* PackageEntryTable::lookup(Symbol* name, ModuleEntry* module) {
MutexLocker ml(Module_lock);
PackageEntry* p = lookup_only(name);
if (p != NULL) {
return p;
} else {
// If not found, add to table. Grab the PackageEntryTable lock first.
MutexLocker ml(Module_lock);
// Since look-up was done lock-free, we need to check if another thread beat
// us in the race to insert the package.
PackageEntry* test = lookup_only(name);
if (test != NULL) {
// A race occurred and another thread introduced the package.
return test;
} else {
assert(module != NULL, "module should never be null");
PackageEntry* entry = new_entry(compute_hash(name), name, module);
add_entry(index_for(name), entry);
return entry;
}
assert(module != NULL, "module should never be null");
PackageEntry* entry = new_entry(compute_hash(name), name, module);
add_entry(index_for(name), entry);
return entry;
}
}
PackageEntry* PackageEntryTable::lookup_only(Symbol* name) {
MutexLockerEx ml(Module_lock->owned_by_self() ? NULL : Module_lock);
int index = index_for(name);
for (PackageEntry* p = bucket(index); p != NULL; p = p->next()) {
if (p->name()->fast_compare(name) == 0) {
@ -296,7 +286,7 @@ void PackageEntry::package_exports_do(ModuleClosure* f) {
}
bool PackageEntry::exported_pending_delete() const {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
assert_locked_or_safepoint(Module_lock);
return (is_unqual_exported() && _qualified_exports != NULL);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -253,7 +253,7 @@ public:
// lookup Package with loader's package entry table, if not found add
PackageEntry* lookup(Symbol* name, ModuleEntry* module);
// Only lookup Package within loader's package entry table. The table read is lock-free.
// Only lookup Package within loader's package entry table.
PackageEntry* lookup_only(Symbol* Package);
void verify_javabase_packages(GrowableArray<Symbol*> *pkg_list);

View File

@ -1807,18 +1807,26 @@ void SystemDictionary::add_to_hierarchy(InstanceKlass* k, TRAPS) {
bool SystemDictionary::do_unloading(GCTimer* gc_timer) {
bool unloading_occurred;
bool is_concurrent = !SafepointSynchronize::is_at_safepoint();
{
GCTraceTime(Debug, gc, phases) t("ClassLoaderData", gc_timer);
assert_locked_or_safepoint(ClassLoaderDataGraph_lock); // caller locks.
// First, mark for unload all ClassLoaderData referencing a dead class loader.
unloading_occurred = ClassLoaderDataGraph::do_unloading();
if (unloading_occurred) {
MutexLockerEx ml2(is_concurrent ? Module_lock : NULL);
JFR_ONLY(Jfr::on_unloading_classes();)
MutexLockerEx ml1(is_concurrent ? SystemDictionary_lock : NULL);
ClassLoaderDataGraph::clean_module_and_package_info();
}
}
// TODO: just return if !unloading_occurred.
// Cleanup ResolvedMethodTable even if no unloading occurred.
{
GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer);
ResolvedMethodTable::trigger_cleanup();
}
if (unloading_occurred) {
{
GCTraceTime(Debug, gc, phases) t("SymbolTable", gc_timer);
@ -1827,23 +1835,21 @@ bool SystemDictionary::do_unloading(GCTimer* gc_timer) {
}
{
MutexLockerEx ml(is_concurrent ? SystemDictionary_lock : NULL);
GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer);
constraints()->purge_loader_constraints();
resolution_errors()->purge_resolution_errors();
}
}
{
GCTraceTime(Debug, gc, phases) t("ProtectionDomainCacheTable", gc_timer);
// Oops referenced by the protection domain cache table may get unreachable independently
// of the class loader (eg. cached protection domain oops). So we need to
// explicitly unlink them here.
_pd_cache_table->trigger_cleanup();
}
{
GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer);
ResolvedMethodTable::trigger_cleanup();
{
GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer);
// Oops referenced by the protection domain cache table may get unreachable independently
// of the class loader (eg. cached protection domain oops). So we need to
// explicitly unlink them here.
// All protection domain oops are linked to the caller class, so if nothing
// unloads, this is not needed.
_pd_cache_table->trigger_cleanup();
}
}
return unloading_occurred;