mirror of
https://github.com/darlinghq/darling-openjdk.git
synced 2024-11-27 06:10:37 +00:00
Merge
This commit is contained in:
commit
dafb3af62d
1
.hgtags
1
.hgtags
@ -493,4 +493,5 @@ e1b3def126240d5433902f3cb0e91a4c27f6db50 jdk-11+18
|
||||
36ca515343e00b021dcfc902e986d26ec994a2e5 jdk-11+19
|
||||
95aad0c785e497f1bade3955c4e4a677b629fa9d jdk-12+0
|
||||
9816d7cc655e53ba081f938b656e31971b8f097a jdk-11+20
|
||||
14708e1acdc3974f4539027cbbcfa6d69f83cf51 jdk-11+21
|
||||
00b16d0457e43d23f6ca5ade6b243edce62750a0 jdk-12+1
|
||||
|
@ -274,6 +274,8 @@ define SetupApiDocsGenerationBody
|
||||
$1_INDIRECT_EXPORTS := $$(call FindTransitiveIndirectDepsForModules, $$($1_MODULES))
|
||||
$1_ALL_MODULES := $$(sort $$($1_MODULES) $$($1_INDIRECT_EXPORTS))
|
||||
|
||||
$1_JAVA_ARGS := -Dextlink.spec.version=$$(VERSION_SPECIFICATION)
|
||||
|
||||
ifeq ($$(ENABLE_FULL_DOCS), true)
|
||||
# Tell the ModuleGraph taglet to generate html links to soon-to-be-created
|
||||
# png files with module graphs.
|
||||
@ -327,9 +329,10 @@ define SetupApiDocsGenerationBody
|
||||
)
|
||||
|
||||
ifeq ($$($1_JAVADOC_CMD), )
|
||||
$1_JAVADOC_CMD := $$(JAVA) -Djava.awt.headless=true \
|
||||
-Dextlink.spec.version=$$(VERSION_SPECIFICATION) $$($1_JAVA_ARGS) \
|
||||
$1_JAVADOC_CMD := $$(JAVA) -Djava.awt.headless=true $$($1_JAVA_ARGS) \
|
||||
$$(NEW_JAVADOC)
|
||||
else
|
||||
$1_OPTIONS += $$(addprefix -J, $$($1_JAVA_ARGS))
|
||||
endif
|
||||
|
||||
$1_VARDEPS := $$($1_JAVA_ARGS) $$($1_OPTIONS) $$(MODULES_SOURCE_PATH) \
|
||||
@ -463,7 +466,9 @@ $(eval $(call SetupApiDocsGeneration, JAVASE_API, \
|
||||
# Setup generation of the reference Java SE API documentation (javadoc + modulegraph)
|
||||
|
||||
# The reference javadoc is just the same as javase, but using the BootJDK javadoc
|
||||
# and a stable set of javadoc options.
|
||||
# and a stable set of javadoc options. Typically it is used for generating
|
||||
# diffs between the reference javadoc and a javadoc bundle of a specific build
|
||||
# generated in the same way.
|
||||
|
||||
$(eval $(call SetupApiDocsGeneration, REFERENCE_API, \
|
||||
MODULES := $(JAVASE_MODULES), \
|
||||
@ -497,10 +502,9 @@ $(eval $(call SetupCopyFiles, COPY_GLOBAL_RESOURCES, \
|
||||
JDK_INDEX_TARGETS += $(COPY_GLOBAL_RESOURCES)
|
||||
|
||||
# Copy the legal notices distributed with the docs bundle
|
||||
DOCS_LEGAL_NOTICES := jquery.md jszip.md pako.md
|
||||
$(eval $(call SetupCopyFiles, COPY_DOCS_LEGAL_NOTICES, \
|
||||
SRC := $(TOPDIR)/src/jdk.javadoc/share/legal, \
|
||||
FILES := $(DOCS_LEGAL_NOTICES), \
|
||||
FILES := $(wildcard $(TOPDIR)/src/jdk.javadoc/share/legal/*), \
|
||||
DEST := $(DOCS_OUTPUTDIR)/legal, \
|
||||
))
|
||||
JDK_INDEX_TARGETS += $(COPY_DOCS_LEGAL_NOTICES)
|
||||
|
@ -201,8 +201,6 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_AOT],
|
||||
ENABLE_AOT="true"
|
||||
elif test "x$enable_aot" = "xno"; then
|
||||
ENABLE_AOT="false"
|
||||
AC_MSG_CHECKING([if aot should be enabled])
|
||||
AC_MSG_RESULT([no, forced])
|
||||
else
|
||||
AC_MSG_ERROR([Invalid value for --enable-aot: $enable_aot])
|
||||
fi
|
||||
@ -228,7 +226,7 @@ AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_AOT],
|
||||
else
|
||||
ENABLE_AOT="false"
|
||||
if test "x$enable_aot" = "xyes"; then
|
||||
AC_MSG_ERROR([AOT is currently only supported on x86_64. Remove --enable-aot.])
|
||||
AC_MSG_ERROR([AOT is currently only supported on x86_64 and aarch64. Remove --enable-aot.])
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@ -374,57 +372,106 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_FEATURES],
|
||||
fi
|
||||
fi
|
||||
|
||||
# Only enable jvmci on x86_64, sparcv9 and aarch64.
|
||||
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xsparcv9" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
|
||||
JVM_FEATURES_jvmci="jvmci"
|
||||
else
|
||||
AC_MSG_CHECKING([if jvmci module jdk.internal.vm.ci should be built])
|
||||
# Check if jvmci is diabled
|
||||
DISABLE_JVMCI=`$ECHO $DISABLED_JVM_FEATURES | $GREP jvmci`
|
||||
if test "x$DISABLE_JVMCI" = "xjvmci"; then
|
||||
AC_MSG_RESULT([no, forced])
|
||||
JVM_FEATURES_jvmci=""
|
||||
INCLUDE_JVMCI="false"
|
||||
else
|
||||
# Only enable jvmci on x86_64, sparcv9 and aarch64
|
||||
if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xsparcv9" || \
|
||||
test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then
|
||||
AC_MSG_RESULT([yes])
|
||||
JVM_FEATURES_jvmci="jvmci"
|
||||
INCLUDE_JVMCI="true"
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
JVM_FEATURES_jvmci=""
|
||||
INCLUDE_JVMCI="false"
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(jvmci); then
|
||||
AC_MSG_ERROR([JVMCI is currently not supported on this platform.])
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_MSG_CHECKING([if jdk.internal.vm.compiler should be built])
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(graal); then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
if test "x$JVM_FEATURES_jvmci" != "xjvmci" ; then
|
||||
AC_MSG_ERROR([Specified JVM feature 'graal' requires feature 'jvmci'])
|
||||
fi
|
||||
INCLUDE_GRAAL="true"
|
||||
AC_SUBST(INCLUDE_JVMCI)
|
||||
|
||||
AC_MSG_CHECKING([if graal module jdk.internal.vm.compiler should be built])
|
||||
# Check if graal is diabled
|
||||
DISABLE_GRAAL=`$ECHO $DISABLED_JVM_FEATURES | $GREP graal`
|
||||
if test "x$DISABLE_GRAAL" = "xgraal"; then
|
||||
AC_MSG_RESULT([no, forced])
|
||||
JVM_FEATURES_graal=""
|
||||
INCLUDE_GRAAL="false"
|
||||
else
|
||||
# By default enable graal build on x64 or where AOT is available.
|
||||
# graal build requires jvmci.
|
||||
if test "x$JVM_FEATURES_jvmci" = "xjvmci" && \
|
||||
(test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
|
||||
test "x$ENABLE_AOT" = "xtrue") ; then
|
||||
AC_MSG_RESULT([yes])
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(graal); then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
if test "x$JVM_FEATURES_jvmci" != "xjvmci" ; then
|
||||
AC_MSG_ERROR([Specified JVM feature 'graal' requires feature 'jvmci'])
|
||||
fi
|
||||
JVM_FEATURES_graal="graal"
|
||||
INCLUDE_GRAAL="true"
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
JVM_FEATURES_graal=""
|
||||
INCLUDE_GRAAL="false"
|
||||
# By default enable graal build on x64 or where AOT is available.
|
||||
# graal build requires jvmci.
|
||||
if test "x$JVM_FEATURES_jvmci" = "xjvmci" && \
|
||||
(test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
|
||||
test "x$ENABLE_AOT" = "xtrue") ; then
|
||||
AC_MSG_RESULT([yes])
|
||||
JVM_FEATURES_graal="graal"
|
||||
INCLUDE_GRAAL="true"
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
JVM_FEATURES_graal=""
|
||||
INCLUDE_GRAAL="false"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_SUBST(INCLUDE_GRAAL)
|
||||
|
||||
# Disable aot with '--with-jvm-features=-aot'
|
||||
DISABLE_AOT=`$ECHO $DISABLED_JVM_FEATURES | $GREP aot`
|
||||
if test "x$DISABLE_AOT" = "xaot"; then
|
||||
ENABLE_AOT="false"
|
||||
fi
|
||||
|
||||
AC_MSG_CHECKING([if aot should be enabled])
|
||||
if test "x$ENABLE_AOT" = "xtrue"; then
|
||||
if test "x$enable_aot" = "xyes"; then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
if test "x$JVM_FEATURES_graal" != "xgraal"; then
|
||||
if test "x$enable_aot" = "xyes" || HOTSPOT_CHECK_JVM_FEATURE(aot); then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
AC_MSG_ERROR([Specified JVM feature 'aot' requires feature 'graal'])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
JVM_FEATURES_aot=""
|
||||
ENABLE_AOT="false"
|
||||
else
|
||||
AC_MSG_RESULT([yes])
|
||||
if test "x$enable_aot" = "xyes" || HOTSPOT_CHECK_JVM_FEATURE(aot); then
|
||||
AC_MSG_RESULT([yes, forced])
|
||||
else
|
||||
AC_MSG_RESULT([yes])
|
||||
fi
|
||||
JVM_FEATURES_aot="aot"
|
||||
fi
|
||||
JVM_FEATURES_aot="aot"
|
||||
else
|
||||
if test "x$enable_aot" = "xno"; then
|
||||
if test "x$enable_aot" = "xno" || "x$DISABLE_AOT" = "xaot"; then
|
||||
AC_MSG_RESULT([no, forced])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
JVM_FEATURES_aot=""
|
||||
if HOTSPOT_CHECK_JVM_FEATURE(aot); then
|
||||
AC_MSG_ERROR([To enable aot, you must use --enable-aot])
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_SUBST(ENABLE_AOT)
|
||||
|
||||
if test "x$OPENJDK_TARGET_CPU" = xarm ; then
|
||||
# Default to use link time optimizations on minimal on arm
|
||||
JVM_FEATURES_link_time_opt="link-time-opt"
|
||||
|
@ -814,6 +814,7 @@ PNG_CFLAGS:=@PNG_CFLAGS@
|
||||
|
||||
INCLUDE_SA=@INCLUDE_SA@
|
||||
INCLUDE_GRAAL=@INCLUDE_GRAAL@
|
||||
INCLUDE_JVMCI=@INCLUDE_JVMCI@
|
||||
|
||||
OS_VERSION_MAJOR:=@OS_VERSION_MAJOR@
|
||||
OS_VERSION_MINOR:=@OS_VERSION_MINOR@
|
||||
|
@ -205,7 +205,14 @@ ifeq ($(INCLUDE_SA), false)
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
# Filter out Graal specific modules if Graal build is disabled
|
||||
# Filter out jvmci specific modules if jvmci is disabled
|
||||
|
||||
ifeq ($(INCLUDE_JVMCI), false)
|
||||
MODULES_FILTER += jdk.internal.vm.ci
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
# Filter out Graal specific modules if Graal is disabled
|
||||
|
||||
ifeq ($(INCLUDE_GRAAL), false)
|
||||
MODULES_FILTER += jdk.internal.vm.compiler
|
||||
|
@ -239,7 +239,7 @@ var getJibProfilesCommon = function (input, data) {
|
||||
|
||||
// These are the base setttings for all the main build profiles.
|
||||
common.main_profile_base = {
|
||||
dependencies: ["boot_jdk", "gnumake", "jtreg", "jib"],
|
||||
dependencies: ["boot_jdk", "gnumake", "jtreg", "jib", "autoconf"],
|
||||
default_make_targets: ["product-bundles", "test-bundles"],
|
||||
configure_args: concat(["--enable-jtreg-failure-handler"],
|
||||
"--with-exclude-translations=de,es,fr,it,ko,pt_BR,sv,ca,tr,cs,sk,ja_JP_A,ja_JP_HA,ja_JP_HI,ja_JP_I",
|
||||
@ -378,7 +378,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"linux-x64": {
|
||||
target_os: "linux",
|
||||
target_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "graphviz", "pandoc", "graalunit_lib"],
|
||||
dependencies: ["devkit", "graphviz", "pandoc", "graalunit_lib"],
|
||||
configure_args: concat(common.configure_args_64bit,
|
||||
"--enable-full-docs", "--with-zlib=system"),
|
||||
default_make_targets: ["docs-bundles"],
|
||||
@ -388,7 +388,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "x86",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf"],
|
||||
dependencies: ["devkit"],
|
||||
configure_args: concat(common.configure_args_32bit,
|
||||
"--with-jvm-variants=minimal,server", "--with-zlib=system"),
|
||||
},
|
||||
@ -396,7 +396,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"macosx-x64": {
|
||||
target_os: "macosx",
|
||||
target_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "graalunit_lib"],
|
||||
dependencies: ["devkit", "graalunit_lib"],
|
||||
configure_args: concat(common.configure_args_64bit, "--with-zlib=system",
|
||||
"--with-macosx-version-max=10.9.0"),
|
||||
},
|
||||
@ -404,7 +404,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"solaris-x64": {
|
||||
target_os: "solaris",
|
||||
target_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "cups"],
|
||||
dependencies: ["devkit", "cups"],
|
||||
configure_args: concat(common.configure_args_64bit,
|
||||
"--with-zlib=system", "--enable-dtrace"),
|
||||
},
|
||||
@ -412,7 +412,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"solaris-sparcv9": {
|
||||
target_os: "solaris",
|
||||
target_cpu: "sparcv9",
|
||||
dependencies: ["devkit", "autoconf", "cups"],
|
||||
dependencies: ["devkit", "cups"],
|
||||
configure_args: concat(common.configure_args_64bit,
|
||||
"--with-zlib=system", "--enable-dtrace"),
|
||||
},
|
||||
@ -420,7 +420,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
"windows-x64": {
|
||||
target_os: "windows",
|
||||
target_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "graalunit_lib"],
|
||||
dependencies: ["devkit", "graalunit_lib"],
|
||||
configure_args: concat(common.configure_args_64bit),
|
||||
},
|
||||
|
||||
@ -428,7 +428,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "windows",
|
||||
target_cpu: "x86",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf"],
|
||||
dependencies: ["devkit"],
|
||||
configure_args: concat(common.configure_args_32bit),
|
||||
},
|
||||
|
||||
@ -436,7 +436,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "aarch64",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "build_devkit", "cups"],
|
||||
dependencies: ["devkit", "build_devkit", "cups"],
|
||||
configure_args: [
|
||||
"--openjdk-target=aarch64-linux-gnu", "--with-freetype=bundled",
|
||||
"--disable-warnings-as-errors", "--with-cpu-port=aarch64",
|
||||
@ -447,7 +447,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "aarch64",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "build_devkit", "cups", "headless_stubs"],
|
||||
dependencies: ["devkit", "build_devkit", "cups", "headless_stubs"],
|
||||
configure_args: [
|
||||
"--with-cpu-port=arm64",
|
||||
"--with-jvm-variants=server",
|
||||
@ -460,7 +460,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "arm",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "build_devkit", "cups"],
|
||||
dependencies: ["devkit", "build_devkit", "cups"],
|
||||
configure_args: [
|
||||
"--openjdk-target=arm-linux-gnueabihf", "--with-freetype=bundled",
|
||||
"--with-abi-profile=arm-vfp-hflt", "--disable-warnings-as-errors"
|
||||
@ -471,7 +471,7 @@ var getJibProfilesProfiles = function (input, common, data) {
|
||||
target_os: "linux",
|
||||
target_cpu: "arm",
|
||||
build_cpu: "x64",
|
||||
dependencies: ["devkit", "autoconf", "build_devkit", "cups"],
|
||||
dependencies: ["devkit", "build_devkit", "cups"],
|
||||
configure_args: [
|
||||
"--with-jvm-variants=minimal1,client",
|
||||
"--with-x=" + input.get("devkit", "install_path") + "/arm-linux-gnueabihf/libc/usr/X11R6-PI",
|
||||
|
@ -1471,7 +1471,7 @@ source %{
|
||||
// Ctl+Mem to a StoreB node (which does the actual card mark).
|
||||
//
|
||||
// n.b. a StoreCM node will only appear in this configuration when
|
||||
// using CMS. StoreCM differs from a normal card mark write (StoreB)
|
||||
// using CMS or G1. StoreCM differs from a normal card mark write (StoreB)
|
||||
// because it implies a requirement to order visibility of the card
|
||||
// mark (StoreCM) relative to the object put (StoreP/N) using a
|
||||
// StoreStore memory barrier (arguably this ought to be represented
|
||||
@ -1481,16 +1481,12 @@ source %{
|
||||
// the sequence
|
||||
//
|
||||
// dmb ishst
|
||||
// stlrb
|
||||
// strb
|
||||
//
|
||||
// However, in the case of a volatile put if we can recognise this
|
||||
// configuration and plant an stlr for the object write then we can
|
||||
// omit the dmb and just plant an strb since visibility of the stlr
|
||||
// is ordered before visibility of subsequent stores. StoreCM nodes
|
||||
// also arise when using G1 or using CMS with conditional card
|
||||
// marking. In these cases (as we shall see) we don't need to insert
|
||||
// the dmb when translating StoreCM because there is already an
|
||||
// intervening StoreLoad barrier between it and the StoreP/N.
|
||||
// However, when using G1 or CMS with conditional card marking (as
|
||||
// we shall see) we don't need to insert the dmb when translating
|
||||
// StoreCM because there is already an intervening StoreLoad barrier
|
||||
// between it and the StoreP/N.
|
||||
//
|
||||
// It is also possible to perform the card mark conditionally on it
|
||||
// currently being unmarked in which case the volatile put graph
|
||||
@ -2868,50 +2864,17 @@ bool unnecessary_storestore(const Node *storecm)
|
||||
{
|
||||
assert(storecm->Opcode() == Op_StoreCM, "expecting a StoreCM");
|
||||
|
||||
// we only ever need to generate a dmb ishst between an object put
|
||||
// and the associated card mark when we are using CMS without
|
||||
// conditional card marking
|
||||
// we need to generate a dmb ishst between an object put and the
|
||||
// associated card mark when we are using CMS without conditional
|
||||
// card marking
|
||||
|
||||
if (!UseConcMarkSweepGC || UseCondCardMark) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// if we are implementing volatile puts using barriers then the
|
||||
// object put is an str so we must insert the dmb ishst
|
||||
|
||||
if (UseBarriersForVolatile) {
|
||||
if (UseConcMarkSweepGC && !UseCondCardMark) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// we can omit the dmb ishst if this StoreCM is part of a volatile
|
||||
// put because in thta case the put will be implemented by stlr
|
||||
//
|
||||
// we need to check for a normal subgraph feeding this StoreCM.
|
||||
// that means the StoreCM must be fed Memory from a leading membar,
|
||||
// either a MemBarRelease or its dependent MemBarCPUOrder, and the
|
||||
// leading membar must be part of a normal subgraph
|
||||
// a storestore is unnecesary in all other cases
|
||||
|
||||
Node *x = storecm->in(StoreNode::Memory);
|
||||
|
||||
if (!x->is_Proj()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
x = x->in(0);
|
||||
|
||||
if (!x->is_MemBar()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MemBarNode *leading = x->as_MemBar();
|
||||
|
||||
// reject invalid candidates
|
||||
if (!leading_membar(leading)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// we can omit the StoreStore if it is the head of a normal subgraph
|
||||
return (leading_to_normal(leading) != NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
@ -25,19 +25,30 @@
|
||||
#include "jvm.h"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "vm_version_ext_ppc.hpp"
|
||||
|
||||
// VM_Version_Ext statics
|
||||
int VM_Version_Ext::_no_of_threads = 0;
|
||||
int VM_Version_Ext::_no_of_cores = 0;
|
||||
int VM_Version_Ext::_no_of_sockets = 0;
|
||||
bool VM_Version_Ext::_initialized = false;
|
||||
char VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0};
|
||||
char VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0};
|
||||
|
||||
// get cpu information.
|
||||
bool VM_Version_Ext::initialize_cpu_information(void) {
|
||||
// Not yet implemented.
|
||||
return false;
|
||||
void VM_Version_Ext::initialize_cpu_information(void) {
|
||||
// do nothing if cpu info has been initialized
|
||||
if (_initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
_no_of_cores = os::processor_count();
|
||||
_no_of_threads = _no_of_cores;
|
||||
_no_of_sockets = _no_of_cores;
|
||||
snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE, "PowerPC POWER%lu", PowerArchitecturePPC64);
|
||||
snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "PPC %s", features_string());
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
int VM_Version_Ext::number_of_threads(void) {
|
||||
@ -56,9 +67,7 @@ int VM_Version_Ext::number_of_sockets(void) {
|
||||
}
|
||||
|
||||
const char* VM_Version_Ext::cpu_name(void) {
|
||||
if (!initialize_cpu_information()) {
|
||||
return NULL;
|
||||
}
|
||||
initialize_cpu_information();
|
||||
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing);
|
||||
if (NULL == tmp) {
|
||||
return NULL;
|
||||
@ -68,9 +77,7 @@ const char* VM_Version_Ext::cpu_name(void) {
|
||||
}
|
||||
|
||||
const char* VM_Version_Ext::cpu_description(void) {
|
||||
if (!initialize_cpu_information()) {
|
||||
return NULL;
|
||||
}
|
||||
initialize_cpu_information();
|
||||
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing);
|
||||
if (NULL == tmp) {
|
||||
return NULL;
|
||||
|
@ -43,10 +43,11 @@ class VM_Version_Ext : public VM_Version {
|
||||
static int _no_of_threads;
|
||||
static int _no_of_cores;
|
||||
static int _no_of_sockets;
|
||||
static bool _initialized;
|
||||
static char _cpu_name[CPU_TYPE_DESC_BUF_SIZE];
|
||||
static char _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE];
|
||||
|
||||
static bool initialize_cpu_information(void);
|
||||
static void initialize_cpu_information(void);
|
||||
|
||||
public:
|
||||
|
||||
|
@ -9839,7 +9839,7 @@ instruct partialSubtypeCheck(rarg1RegP index, rarg2RegP sub, rarg3RegP super, fl
|
||||
match(Set index (PartialSubtypeCheck sub super));
|
||||
effect(KILL pcc, KILL scratch1, KILL scratch2);
|
||||
ins_cost(10 * DEFAULT_COST);
|
||||
size(12);
|
||||
// TODO: s390 port size(FIXED_SIZE);
|
||||
format %{ " CALL PartialSubtypeCheck\n" %}
|
||||
ins_encode %{
|
||||
AddressLiteral stub_address(StubRoutines::zarch::partial_subtype_check());
|
||||
|
@ -3636,7 +3636,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
|
||||
NearLabel subtype, no_such_interface;
|
||||
|
||||
__ check_klass_subtype(klass, interface, Z_tmp_2, Z_tmp_3, subtype);
|
||||
__ check_klass_subtype(klass, interface, Z_tmp_2, flags/*scratch*/, subtype);
|
||||
// If we get here the typecheck failed
|
||||
__ z_bru(no_such_interface);
|
||||
__ bind(subtype);
|
||||
@ -3649,7 +3649,6 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ bind(notVFinal);
|
||||
|
||||
// Get receiver klass into klass - also a null check.
|
||||
__ restore_locals();
|
||||
__ load_klass(klass, receiver);
|
||||
|
||||
__ lookup_interface_method(klass, interface, noreg, noreg, /*temp*/Z_ARG1,
|
||||
@ -3680,7 +3679,7 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// interpreter entry point and a conditional jump to it in case of a null
|
||||
// method.
|
||||
__ compareU64_and_branch(method2, (intptr_t) 0,
|
||||
Assembler::bcondZero, no_such_method);
|
||||
Assembler::bcondZero, no_such_method);
|
||||
|
||||
__ profile_arguments_type(Z_tmp_1, method2, Z_tmp_2, true);
|
||||
|
||||
@ -3695,8 +3694,6 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ bind(no_such_method);
|
||||
|
||||
// Throw exception.
|
||||
__ restore_bcp(); // Bcp must be correct for exception handler (was destroyed).
|
||||
__ restore_locals(); // Make sure locals pointer is correct as well (was destroyed).
|
||||
// Pass arguments for generating a verbose error message.
|
||||
__ z_lgr(Z_tmp_1, method); // Prevent register clash.
|
||||
__ call_VM(noreg,
|
||||
@ -3709,8 +3706,6 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ bind(no_such_interface);
|
||||
|
||||
// Throw exception.
|
||||
__ restore_bcp(); // Bcp must be correct for exception handler (was destroyed).
|
||||
__ restore_locals(); // Make sure locals pointer is correct as well (was destroyed).
|
||||
// Pass arguments for generating a verbose error message.
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address,
|
||||
|
@ -31,13 +31,23 @@
|
||||
int VM_Version_Ext::_no_of_threads = 0;
|
||||
int VM_Version_Ext::_no_of_cores = 0;
|
||||
int VM_Version_Ext::_no_of_sockets = 0;
|
||||
bool VM_Version_Ext::_initialized = false;
|
||||
char VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0};
|
||||
char VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0};
|
||||
|
||||
// get cpu information.
|
||||
bool VM_Version_Ext::initialize_cpu_information(void) {
|
||||
// Not yet implemented.
|
||||
return false;
|
||||
void VM_Version_Ext::initialize_cpu_information(void) {
|
||||
// do nothing if cpu info has been initialized
|
||||
if (_initialized) {
|
||||
return;
|
||||
}
|
||||
|
||||
_no_of_cores = os::processor_count();
|
||||
_no_of_threads = _no_of_cores;
|
||||
_no_of_sockets = _no_of_cores;
|
||||
snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE, "s390 %s", VM_Version::get_model_string());
|
||||
snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "zArch %s", features_string());
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
int VM_Version_Ext::number_of_threads(void) {
|
||||
@ -56,9 +66,7 @@ int VM_Version_Ext::number_of_sockets(void) {
|
||||
}
|
||||
|
||||
const char* VM_Version_Ext::cpu_name(void) {
|
||||
if (!initialize_cpu_information()) {
|
||||
return NULL;
|
||||
}
|
||||
initialize_cpu_information();
|
||||
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing);
|
||||
if (NULL == tmp) {
|
||||
return NULL;
|
||||
@ -68,9 +76,7 @@ const char* VM_Version_Ext::cpu_name(void) {
|
||||
}
|
||||
|
||||
const char* VM_Version_Ext::cpu_description(void) {
|
||||
if (!initialize_cpu_information()) {
|
||||
return NULL;
|
||||
}
|
||||
initialize_cpu_information();
|
||||
char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing);
|
||||
if (NULL == tmp) {
|
||||
return NULL;
|
||||
|
@ -43,10 +43,11 @@ class VM_Version_Ext : public VM_Version {
|
||||
static int _no_of_threads;
|
||||
static int _no_of_cores;
|
||||
static int _no_of_sockets;
|
||||
static bool _initialized;
|
||||
static char _cpu_name[CPU_TYPE_DESC_BUF_SIZE];
|
||||
static char _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE];
|
||||
|
||||
static bool initialize_cpu_information(void);
|
||||
static void initialize_cpu_information(void);
|
||||
|
||||
public:
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,6 +36,7 @@
|
||||
# include <sys/sysinfo.h>
|
||||
|
||||
bool VM_Version::_is_determine_features_test_running = false;
|
||||
const char* VM_Version::_model_string;
|
||||
|
||||
unsigned long VM_Version::_features[_features_buffer_len] = {0, 0, 0, 0};
|
||||
unsigned long VM_Version::_cipher_features[_features_buffer_len] = {0, 0, 0, 0};
|
||||
@ -210,6 +211,10 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
|
||||
}
|
||||
|
||||
if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseSHA, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
|
||||
}
|
||||
@ -244,32 +249,40 @@ void VM_Version::initialize() {
|
||||
void VM_Version::set_features_string() {
|
||||
|
||||
unsigned int ambiguity = 0;
|
||||
_model_string = z_name[0];
|
||||
if (is_z13()) {
|
||||
_features_string = "System z G7-z13 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB, Cond Load/Store, Interlocked Update, TxM, VectorInstr)";
|
||||
_model_string = z_name[7];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_ec12()) {
|
||||
_features_string = "System z G6-EC12 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB, Cond Load/Store, Interlocked Update, TxM)";
|
||||
_model_string = z_name[6];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z196()) {
|
||||
_features_string = "System z G5-z196 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB, Cond Load/Store, Interlocked Update)";
|
||||
_model_string = z_name[5];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z10()) {
|
||||
_features_string = "System z G4-z10 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB)";
|
||||
_model_string = z_name[4];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z9()) {
|
||||
_features_string = "System z G3-z9 (LDISP_fast, ExtImm), out-of-support as of 2016-04-01";
|
||||
_model_string = z_name[3];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z990()) {
|
||||
_features_string = "System z G2-z990 (LDISP_fast), out-of-support as of 2014-07-01";
|
||||
_model_string = z_name[2];
|
||||
ambiguity++;
|
||||
}
|
||||
if (is_z900()) {
|
||||
_features_string = "System z G1-z900 (LDISP), out-of-support as of 2014-07-01";
|
||||
_model_string = z_name[1];
|
||||
ambiguity++;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -131,6 +131,7 @@ class VM_Version: public Abstract_VM_Version {
|
||||
static unsigned int _Dcache_lineSize;
|
||||
static unsigned int _Icache_lineSize;
|
||||
static bool _is_determine_features_test_running;
|
||||
static const char* _model_string;
|
||||
|
||||
static bool test_feature_bit(unsigned long* featureBuffer, int featureNum, unsigned int bufLen);
|
||||
static void set_features_string();
|
||||
@ -346,6 +347,7 @@ class VM_Version: public Abstract_VM_Version {
|
||||
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
|
||||
|
||||
// CPU feature query functions
|
||||
static const char* get_model_string() { return _model_string; }
|
||||
static bool has_StoreFacilityListExtended() { return (_features[0] & StoreFacilityListExtendedMask) == StoreFacilityListExtendedMask; }
|
||||
static bool has_Crypto() { return (_features[0] & CryptoFacilityMask) == CryptoFacilityMask; }
|
||||
static bool has_ETF2() { return (_features[0] & ETF2Mask) == ETF2Mask; }
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "gc/z/zErrno.hpp"
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -47,10 +46,6 @@
|
||||
// Sysfs file for transparent huge page on tmpfs
|
||||
#define ZFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
|
||||
|
||||
// Default mount points
|
||||
#define ZMOUNTPOINT_TMPFS "/dev/shm"
|
||||
#define ZMOUNTPOINT_HUGETLBFS "/hugepages"
|
||||
|
||||
// Java heap filename
|
||||
#define ZFILENAME_HEAP "java_heap"
|
||||
|
||||
@ -79,13 +74,30 @@
|
||||
#define HUGETLBFS_MAGIC 0x958458f6
|
||||
#endif
|
||||
|
||||
// Preferred tmpfs mount points, ordered by priority
|
||||
static const char* z_preferred_tmpfs_mountpoints[] = {
|
||||
"/dev/shm",
|
||||
"/run/shm",
|
||||
NULL
|
||||
};
|
||||
|
||||
// Preferred hugetlbfs mount points, ordered by priority
|
||||
static const char* z_preferred_hugetlbfs_mountpoints[] = {
|
||||
"/dev/hugepages",
|
||||
"/hugepages",
|
||||
NULL
|
||||
};
|
||||
|
||||
static int z_memfd_create(const char *name, unsigned int flags) {
|
||||
return syscall(__NR_memfd_create, name, flags);
|
||||
}
|
||||
|
||||
bool ZBackingFile::_hugetlbfs_mmap_retry = true;
|
||||
|
||||
ZBackingFile::ZBackingFile() :
|
||||
_fd(-1),
|
||||
_filesystem(0),
|
||||
_available(0),
|
||||
_initialized(false) {
|
||||
|
||||
// Create backing file
|
||||
@ -94,39 +106,47 @@ ZBackingFile::ZBackingFile() :
|
||||
return;
|
||||
}
|
||||
|
||||
// Get filesystem type
|
||||
// Get filesystem statistics
|
||||
struct statfs statfs_buf;
|
||||
if (fstatfs(_fd, &statfs_buf) == -1) {
|
||||
ZErrno err;
|
||||
log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", err.to_string());
|
||||
log_error(gc, init)("Failed to determine filesystem type for backing file (%s)",
|
||||
err.to_string());
|
||||
return;
|
||||
}
|
||||
|
||||
_filesystem = statfs_buf.f_type;
|
||||
_available = statfs_buf.f_bavail * statfs_buf.f_bsize;
|
||||
|
||||
// Make sure we're on a supported filesystem
|
||||
if (!is_tmpfs() && !is_hugetlbfs()) {
|
||||
log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc, init)("Backing file must be located on a %s or a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
// Make sure the filesystem type matches requested large page type
|
||||
if (ZLargePages::is_transparent() && !is_tmpfs()) {
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", ZFILESYSTEM_TMPFS);
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", ZFILESYSTEM_TMPFS);
|
||||
log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
|
||||
ZFILESYSTEM_TMPFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
|
||||
log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
|
||||
log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
|
||||
log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem",
|
||||
ZFILESYSTEM_HUGETLBFS);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -149,17 +169,21 @@ int ZBackingFile::create_mem_fd(const char* name) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_debug(gc, init)("Heap backed by file /memfd:%s", filename);
|
||||
log_info(gc, init)("Heap backed by file: /memfd:%s", filename);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
int ZBackingFile::create_file_fd(const char* name) const {
|
||||
const char* const filesystem = ZLargePages::is_explicit() ? ZFILESYSTEM_HUGETLBFS : ZFILESYSTEM_TMPFS;
|
||||
const char* const mountpoint = ZLargePages::is_explicit() ? ZMOUNTPOINT_HUGETLBFS : ZMOUNTPOINT_TMPFS;
|
||||
const char* const filesystem = ZLargePages::is_explicit()
|
||||
? ZFILESYSTEM_HUGETLBFS
|
||||
: ZFILESYSTEM_TMPFS;
|
||||
const char** const preferred_mountpoints = ZLargePages::is_explicit()
|
||||
? z_preferred_hugetlbfs_mountpoints
|
||||
: z_preferred_tmpfs_mountpoints;
|
||||
|
||||
// Find mountpoint
|
||||
ZBackingPath path(filesystem, mountpoint);
|
||||
ZBackingPath path(filesystem, preferred_mountpoints);
|
||||
if (path.get() == NULL) {
|
||||
log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
|
||||
return -1;
|
||||
@ -181,7 +205,7 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_debug(gc, init)("Heap backed by file %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
|
||||
log_info(gc, init)("Heap backed by file: %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
|
||||
|
||||
return fd_anon;
|
||||
}
|
||||
@ -207,7 +231,7 @@ int ZBackingFile::create_file_fd(const char* name) const {
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_debug(gc, init)("Heap backed by file %s", filename);
|
||||
log_info(gc, init)("Heap backed by file: %s", filename);
|
||||
|
||||
return fd;
|
||||
}
|
||||
@ -238,6 +262,10 @@ int ZBackingFile::fd() const {
|
||||
return _fd;
|
||||
}
|
||||
|
||||
size_t ZBackingFile::available() const {
|
||||
return _available;
|
||||
}
|
||||
|
||||
bool ZBackingFile::is_tmpfs() const {
|
||||
return _filesystem == TMPFS_MAGIC;
|
||||
}
|
||||
@ -292,12 +320,12 @@ bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length, size_t alignme
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZBackingFile::expand_tmpfs(size_t offset, size_t length) const {
|
||||
bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length) const {
|
||||
assert(is_tmpfs(), "Wrong filesystem");
|
||||
return try_expand_tmpfs(offset, length, os::vm_page_size());
|
||||
}
|
||||
|
||||
bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
bool ZBackingFile::try_expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
assert(is_hugetlbfs(), "Wrong filesystem");
|
||||
|
||||
// Prior to kernel 4.3, hugetlbfs did not support posix_fallocate().
|
||||
@ -320,11 +348,11 @@ bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
// process being returned to the huge page pool and made available for new
|
||||
// allocations.
|
||||
void* addr = MAP_FAILED;
|
||||
const int max_attempts = 3;
|
||||
const int max_attempts = 5;
|
||||
for (int attempt = 1; attempt <= max_attempts; attempt++) {
|
||||
addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
|
||||
if (addr != MAP_FAILED || is_init_completed()) {
|
||||
// Mapping was successful or initialization phase has completed
|
||||
if (addr != MAP_FAILED || !_hugetlbfs_mmap_retry) {
|
||||
// Mapping was successful or mmap retry is disabled
|
||||
break;
|
||||
}
|
||||
|
||||
@ -337,6 +365,11 @@ bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
// Disable mmap retry from now on
|
||||
if (_hugetlbfs_mmap_retry) {
|
||||
_hugetlbfs_mmap_retry = false;
|
||||
}
|
||||
|
||||
if (addr == MAP_FAILED) {
|
||||
// Not enough huge pages left
|
||||
ZErrno err;
|
||||
@ -355,6 +388,39 @@ bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZBackingFile::expand(size_t offset, size_t length) const {
|
||||
return is_hugetlbfs() ? expand_hugetlbfs(offset, length) : expand_tmpfs(offset, length);
|
||||
bool ZBackingFile::try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const {
|
||||
assert(is_aligned(offset, alignment), "Invalid offset");
|
||||
assert(is_aligned(length, alignment), "Invalid length");
|
||||
|
||||
log_debug(gc)("Expanding heap from " SIZE_FORMAT "M to " SIZE_FORMAT "M", offset / M, (offset + length) / M);
|
||||
|
||||
return is_hugetlbfs() ? try_expand_hugetlbfs(offset, length) : try_expand_tmpfs(offset, length);
|
||||
}
|
||||
|
||||
size_t ZBackingFile::try_expand(size_t offset, size_t length, size_t alignment) const {
|
||||
size_t start = offset;
|
||||
size_t end = offset + length;
|
||||
|
||||
// Try to expand
|
||||
if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) {
|
||||
// Success
|
||||
return end;
|
||||
}
|
||||
|
||||
// Failed, try to expand as much as possible
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, alignment);
|
||||
if (length < alignment) {
|
||||
// Done, don't expand more
|
||||
return start;
|
||||
}
|
||||
|
||||
if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) {
|
||||
// Success, try expand more
|
||||
start += length;
|
||||
} else {
|
||||
// Failed, try expand less
|
||||
end -= length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,8 +28,11 @@
|
||||
|
||||
class ZBackingFile {
|
||||
private:
|
||||
static bool _hugetlbfs_mmap_retry;
|
||||
|
||||
int _fd;
|
||||
uint64_t _filesystem;
|
||||
size_t _available;
|
||||
bool _initialized;
|
||||
|
||||
int create_mem_fd(const char* name) const;
|
||||
@ -42,9 +45,9 @@ private:
|
||||
|
||||
bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
|
||||
bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const;
|
||||
bool expand_tmpfs(size_t offset, size_t length) const;
|
||||
|
||||
bool expand_hugetlbfs(size_t offset, size_t length) const;
|
||||
bool try_expand_tmpfs(size_t offset, size_t length) const;
|
||||
bool try_expand_hugetlbfs(size_t offset, size_t length) const;
|
||||
bool try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const;
|
||||
|
||||
public:
|
||||
ZBackingFile();
|
||||
@ -52,7 +55,9 @@ public:
|
||||
bool is_initialized() const;
|
||||
|
||||
int fd() const;
|
||||
bool expand(size_t offset, size_t length) const;
|
||||
size_t available() const;
|
||||
|
||||
size_t try_expand(size_t offset, size_t length, size_t alignment) const;
|
||||
};
|
||||
|
||||
#endif // OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
|
||||
|
@ -33,13 +33,13 @@
|
||||
// Mount information, see proc(5) for more details.
|
||||
#define PROC_SELF_MOUNTINFO "/proc/self/mountinfo"
|
||||
|
||||
ZBackingPath::ZBackingPath(const char* filesystem, const char* preferred_path) {
|
||||
ZBackingPath::ZBackingPath(const char* filesystem, const char** preferred_mountpoints) {
|
||||
if (ZPath != NULL) {
|
||||
// Use specified path
|
||||
_path = strdup(ZPath);
|
||||
} else {
|
||||
// Find suitable path
|
||||
_path = find_mountpoint(filesystem, preferred_path);
|
||||
_path = find_mountpoint(filesystem, preferred_mountpoints);
|
||||
}
|
||||
}
|
||||
|
||||
@ -52,8 +52,8 @@ char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) con
|
||||
char* line_mountpoint = NULL;
|
||||
char* line_filesystem = NULL;
|
||||
|
||||
// Parse line and return a newly allocated string containing the mountpoint if
|
||||
// the line contains a matching filesystem and the mountpoint is accessible by
|
||||
// Parse line and return a newly allocated string containing the mount point if
|
||||
// the line contains a matching filesystem and the mount point is accessible by
|
||||
// the current user.
|
||||
if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
|
||||
strcmp(line_filesystem, filesystem) != 0 ||
|
||||
@ -68,7 +68,7 @@ char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) con
|
||||
return line_mountpoint;
|
||||
}
|
||||
|
||||
void ZBackingPath::get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const {
|
||||
void ZBackingPath::get_mountpoints(const char* filesystem, ZArray<char*>* mountpoints) const {
|
||||
FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
|
||||
if (fd == NULL) {
|
||||
ZErrno err;
|
||||
@ -98,37 +98,45 @@ void ZBackingPath::free_mountpoints(ZArray<char*>* mountpoints) const {
|
||||
mountpoints->clear();
|
||||
}
|
||||
|
||||
char* ZBackingPath::find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const {
|
||||
char* ZBackingPath::find_preferred_mountpoint(const char* filesystem,
|
||||
ZArray<char*>* mountpoints,
|
||||
const char** preferred_mountpoints) const {
|
||||
// Find preferred mount point
|
||||
ZArrayIterator<char*> iter1(mountpoints);
|
||||
for (char* mountpoint; iter1.next(&mountpoint);) {
|
||||
for (const char** preferred = preferred_mountpoints; *preferred != NULL; preferred++) {
|
||||
if (!strcmp(mountpoint, *preferred)) {
|
||||
// Preferred mount point found
|
||||
return strdup(mountpoint);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Preferred mount point not found
|
||||
log_error(gc, init)("More than one %s filesystem found:", filesystem);
|
||||
ZArrayIterator<char*> iter2(mountpoints);
|
||||
for (char* mountpoint; iter2.next(&mountpoint);) {
|
||||
log_error(gc, init)(" %s", mountpoint);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
char* ZBackingPath::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const {
|
||||
char* path = NULL;
|
||||
ZArray<char*> mountpoints;
|
||||
|
||||
get_mountpoints(&mountpoints, filesystem);
|
||||
get_mountpoints(filesystem, &mountpoints);
|
||||
|
||||
if (mountpoints.size() == 0) {
|
||||
// No filesystem found
|
||||
// No mount point found
|
||||
log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem);
|
||||
} else if (mountpoints.size() == 1) {
|
||||
// One filesystem found
|
||||
// One mount point found
|
||||
path = strdup(mountpoints.at(0));
|
||||
} else if (mountpoints.size() > 1) {
|
||||
// More than one filesystem found
|
||||
ZArrayIterator<char*> iter(&mountpoints);
|
||||
for (char* mountpoint; iter.next(&mountpoint);) {
|
||||
if (!strcmp(mountpoint, preferred_mountpoint)) {
|
||||
// Preferred mount point found
|
||||
path = strdup(mountpoint);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (path == NULL) {
|
||||
// Preferred mount point not found
|
||||
log_error(gc, init)("More than one %s filesystem found:", filesystem);
|
||||
ZArrayIterator<char*> iter2(&mountpoints);
|
||||
for (char* mountpoint; iter2.next(&mountpoint);) {
|
||||
log_error(gc, init)(" %s", mountpoint);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// More than one mount point found
|
||||
path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints);
|
||||
}
|
||||
|
||||
free_mountpoints(&mountpoints);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,13 +31,19 @@ class ZBackingPath : public StackObj {
|
||||
private:
|
||||
char* _path;
|
||||
|
||||
char* get_mountpoint(const char* line, const char* filesystem) const;
|
||||
void get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const;
|
||||
char* get_mountpoint(const char* line,
|
||||
const char* filesystem) const;
|
||||
void get_mountpoints(const char* filesystem,
|
||||
ZArray<char*>* mountpoints) const;
|
||||
void free_mountpoints(ZArray<char*>* mountpoints) const;
|
||||
char* find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const;
|
||||
char* find_preferred_mountpoint(const char* filesystem,
|
||||
ZArray<char*>* mountpoints,
|
||||
const char** preferred_mountpoints) const;
|
||||
char* find_mountpoint(const char* filesystem,
|
||||
const char** preferred_mountpoints) const;
|
||||
|
||||
public:
|
||||
ZBackingPath(const char* filesystem, const char* preferred_path);
|
||||
ZBackingPath(const char* filesystem, const char** preferred_mountpoints);
|
||||
~ZBackingPath();
|
||||
|
||||
const char* get() const;
|
||||
|
@ -52,8 +52,15 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granu
|
||||
_file(),
|
||||
_granule_size(granule_size) {
|
||||
|
||||
// Check and warn if max map count seems too low
|
||||
if (!_file.is_initialized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Check and warn if max map count is too low
|
||||
check_max_map_count(max_capacity, granule_size);
|
||||
|
||||
// Check and warn if available space on filesystem is too low
|
||||
check_available_space_on_filesystem(max_capacity);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
|
||||
@ -61,7 +68,7 @@ void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t gra
|
||||
FILE* const file = fopen(filename, "r");
|
||||
if (file == NULL) {
|
||||
// Failed to open file, skip check
|
||||
log_debug(gc)("Failed to open %s", filename);
|
||||
log_debug(gc, init)("Failed to open %s", filename);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -70,7 +77,7 @@ void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t gra
|
||||
fclose(file);
|
||||
if (result != 1) {
|
||||
// Failed to read file, skip check
|
||||
log_debug(gc)("Failed to read %s", filename);
|
||||
log_debug(gc, init)("Failed to read %s", filename);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -81,15 +88,43 @@ void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t gra
|
||||
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
|
||||
const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
|
||||
if (actual_max_map_count < required_max_map_count) {
|
||||
log_warning(gc)("The system limit on number of memory mappings "
|
||||
"per process might be too low for the given");
|
||||
log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please "
|
||||
"adjust %s to allow for at least", max_capacity / M, filename);
|
||||
log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). "
|
||||
"Continuing execution with the current limit could",
|
||||
required_max_map_count, actual_max_map_count);
|
||||
log_warning(gc)("lead to a fatal error down the line, due to failed "
|
||||
"attempts to map memory.");
|
||||
log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc, init)("The system limit on number of memory mappings per process might be too low "
|
||||
"for the given");
|
||||
log_warning(gc, init)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at",
|
||||
max_capacity / M, filename);
|
||||
log_warning(gc, init)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing "
|
||||
"execution with the current", required_max_map_count, actual_max_map_count);
|
||||
log_warning(gc, init)("limit could lead to a fatal error, due to failure to map memory.");
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::check_available_space_on_filesystem(size_t max_capacity) const {
|
||||
// Note that the available space on a tmpfs or a hugetlbfs filesystem
|
||||
// will be zero if no size limit was specified when it was mounted.
|
||||
const size_t available = _file.available();
|
||||
if (available == 0) {
|
||||
// No size limit set, skip check
|
||||
log_info(gc, init)("Available space on backing filesystem: N/A");
|
||||
return;
|
||||
}
|
||||
|
||||
log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M",
|
||||
available / M);
|
||||
|
||||
// Warn if the filesystem doesn't currently have enough space available to hold
|
||||
// the max heap size. The max heap size will be capped if we later hit this limit
|
||||
// when trying to expand the heap.
|
||||
if (available < max_capacity) {
|
||||
log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
||||
log_warning(gc, init)("Not enough space available on the backing filesystem to hold the current "
|
||||
"max Java heap");
|
||||
log_warning(gc, init)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem "
|
||||
"accordingly (available", max_capacity / M);
|
||||
log_warning(gc, init)("space is currently " SIZE_FORMAT "M). Continuing execution with the current "
|
||||
"filesystem size could", available / M);
|
||||
log_warning(gc, init)("lead to a premature OutOfMemoryError being thrown, due to failure to map "
|
||||
"memory.");
|
||||
}
|
||||
}
|
||||
|
||||
@ -97,18 +132,16 @@ bool ZPhysicalMemoryBacking::is_initialized() const {
|
||||
return _file.is_initialized();
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) {
|
||||
const size_t size = to - from;
|
||||
size_t ZPhysicalMemoryBacking::try_expand(size_t old_capacity, size_t new_capacity) {
|
||||
assert(old_capacity < new_capacity, "Invalid old/new capacity");
|
||||
|
||||
// Expand
|
||||
if (!_file.expand(from, size)) {
|
||||
return false;
|
||||
const size_t capacity = _file.try_expand(old_capacity, new_capacity - old_capacity, _granule_size);
|
||||
if (capacity > old_capacity) {
|
||||
// Add expanded capacity to free list
|
||||
_manager.free(old_capacity, capacity - old_capacity);
|
||||
}
|
||||
|
||||
// Add expanded space to free list
|
||||
_manager.free(from, size);
|
||||
|
||||
return true;
|
||||
return capacity;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,6 +37,7 @@ private:
|
||||
const size_t _granule_size;
|
||||
|
||||
void check_max_map_count(size_t max_capacity, size_t granule_size) const;
|
||||
void check_available_space_on_filesystem(size_t max_capacity) const;
|
||||
void map_failed(ZErrno err) const;
|
||||
|
||||
void advise_view(uintptr_t addr, size_t size) const;
|
||||
@ -49,7 +50,8 @@ public:
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
bool expand(size_t from, size_t to);
|
||||
size_t try_expand(size_t old_capacity, size_t new_capacity);
|
||||
|
||||
ZPhysicalMemory alloc(size_t size);
|
||||
void free(ZPhysicalMemory pmem);
|
||||
|
||||
|
@ -272,6 +272,7 @@ void AOTCompiledMethod::metadata_do(void f(Metadata*)) {
|
||||
if (md != _method) f(md);
|
||||
}
|
||||
} else if (iter.type() == relocInfo::virtual_call_type) {
|
||||
ResourceMark rm;
|
||||
// Check compiledIC holders associated with this nmethod
|
||||
CompiledIC *ic = CompiledIC_at(&iter);
|
||||
if (ic->is_icholder_call()) {
|
||||
@ -444,6 +445,7 @@ void AOTCompiledMethod::clear_inline_caches() {
|
||||
return;
|
||||
}
|
||||
|
||||
ResourceMark rm;
|
||||
RelocIterator iter(this);
|
||||
while (iter.next()) {
|
||||
iter.reloc()->clear_inline_cache();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -255,8 +255,7 @@ ciConstant ciBytecodeStream::get_constant() {
|
||||
// constant.
|
||||
constantTag ciBytecodeStream::get_constant_pool_tag(int index) const {
|
||||
VM_ENTRY_MARK;
|
||||
BasicType bt = _method->get_Method()->constants()->basic_type_for_constant_at(index);
|
||||
return constantTag::ofBasicType(bt);
|
||||
return _method->get_Method()->constants()->constant_tag_at(index);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
@ -325,6 +325,7 @@ void CompiledMethod::clear_inline_caches() {
|
||||
// Clear ICStubs of all compiled ICs
|
||||
void CompiledMethod::clear_ic_stubs() {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
ResourceMark rm;
|
||||
RelocIterator iter(this);
|
||||
while(iter.next()) {
|
||||
if (iter.type() == relocInfo::virtual_call_type) {
|
||||
@ -547,6 +548,7 @@ bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurre
|
||||
bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
|
||||
assert_locked_or_safepoint(CompiledIC_lock);
|
||||
bool postponed = false;
|
||||
ResourceMark rm;
|
||||
|
||||
// Find all calls in an nmethod and clear the ones that point to non-entrant,
|
||||
// zombie and unloaded nmethods.
|
||||
|
@ -530,7 +530,6 @@ CompileQueue* CompileBroker::compile_queue(int comp_level) {
|
||||
|
||||
void CompileBroker::print_compile_queues(outputStream* st) {
|
||||
st->print_cr("Current compiles: ");
|
||||
MutexLocker locker(MethodCompileQueue_lock);
|
||||
|
||||
char buf[2000];
|
||||
int buflen = sizeof(buf);
|
||||
@ -546,7 +545,7 @@ void CompileBroker::print_compile_queues(outputStream* st) {
|
||||
}
|
||||
|
||||
void CompileQueue::print(outputStream* st) {
|
||||
assert(MethodCompileQueue_lock->owned_by_self(), "must own lock");
|
||||
assert_locked_or_safepoint(MethodCompileQueue_lock);
|
||||
st->print_cr("%s:", name());
|
||||
CompileTask* task = _first;
|
||||
if (task == NULL) {
|
||||
|
@ -253,17 +253,18 @@ void set_jvmci_specific_flags() {
|
||||
if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
|
||||
FLAG_SET_DEFAULT(OnStackReplacePercentage, 933);
|
||||
}
|
||||
// JVMCI needs values not less than defaults
|
||||
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
||||
FLAG_SET_DEFAULT(ReservedCodeCacheSize, 64*M);
|
||||
FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize));
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
|
||||
FLAG_SET_DEFAULT(InitialCodeCacheSize, 16*M);
|
||||
FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize));
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(MetaspaceSize)) {
|
||||
FLAG_SET_DEFAULT(MetaspaceSize, 12*M);
|
||||
FLAG_SET_DEFAULT(MetaspaceSize, MAX2(12*M, MetaspaceSize));
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
|
||||
FLAG_SET_DEFAULT(NewSizeThreadIncrease, 4*K);
|
||||
FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease));
|
||||
}
|
||||
if (TieredStopAtLevel != CompLevel_full_optimization) {
|
||||
// Currently JVMCI compiler can only work at the full optimization level
|
||||
|
@ -1024,11 +1024,17 @@ class G1UpdateRemSetTrackingBeforeRebuildTask : public AbstractGangTask {
|
||||
|
||||
uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild.
|
||||
|
||||
void update_remset_before_rebuild(HeapRegion * hr) {
|
||||
void update_remset_before_rebuild(HeapRegion* hr) {
|
||||
G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker();
|
||||
|
||||
size_t const live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize;
|
||||
bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
|
||||
bool selected_for_rebuild;
|
||||
if (hr->is_humongous()) {
|
||||
bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0;
|
||||
selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live);
|
||||
} else {
|
||||
size_t const live_bytes = _cm->liveness(hr->hrm_index());
|
||||
selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes);
|
||||
}
|
||||
if (selected_for_rebuild) {
|
||||
_num_regions_selected_for_rebuild++;
|
||||
}
|
||||
|
@ -29,10 +29,6 @@
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
bool G1RemSetTrackingPolicy::is_interesting_humongous_region(HeapRegion* r) const {
|
||||
return r->is_humongous() && oop(r->humongous_start_region()->bottom())->is_typeArray();
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const {
|
||||
// All non-free, non-young, non-closed archive regions need to be scanned for references;
|
||||
// At every gc we gather references to other regions in young, and closed archive
|
||||
@ -64,51 +60,81 @@ void G1RemSetTrackingPolicy::update_at_free(HeapRegion* r) {
|
||||
/* nothing to do */
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {
|
||||
static void print_before_rebuild(HeapRegion* r, bool selected_for_rebuild, size_t total_live_bytes, size_t live_bytes) {
|
||||
log_trace(gc, remset, tracking)("Before rebuild region %u "
|
||||
"(ntams: " PTR_FORMAT ") "
|
||||
"total_live_bytes " SIZE_FORMAT " "
|
||||
"selected %s "
|
||||
"(live_bytes " SIZE_FORMAT " "
|
||||
"next_marked " SIZE_FORMAT " "
|
||||
"marked " SIZE_FORMAT " "
|
||||
"type %s)",
|
||||
r->hrm_index(),
|
||||
p2i(r->next_top_at_mark_start()),
|
||||
total_live_bytes,
|
||||
BOOL_TO_STR(selected_for_rebuild),
|
||||
live_bytes,
|
||||
r->next_marked_bytes(),
|
||||
r->marked_bytes(),
|
||||
r->get_type_str());
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r, bool is_live) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(r->is_humongous(), "Region %u should be humongous", r->hrm_index());
|
||||
|
||||
if (r->is_archive()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
|
||||
|
||||
bool selected_for_rebuild = false;
|
||||
// For humongous regions, to be of interest for rebuilding the remembered set the following must apply:
|
||||
// - We always try to update the remembered sets of humongous regions containing
|
||||
// type arrays as they might have been reset after full gc.
|
||||
if (is_live && oop(r->humongous_start_region()->bottom())->is_typeArray() && !r->rem_set()->is_tracked()) {
|
||||
r->rem_set()->set_state_updating();
|
||||
selected_for_rebuild = true;
|
||||
}
|
||||
|
||||
size_t const live_bytes = is_live ? HeapRegion::GrainBytes : 0;
|
||||
print_before_rebuild(r, selected_for_rebuild, live_bytes, live_bytes);
|
||||
|
||||
return selected_for_rebuild;
|
||||
}
|
||||
|
||||
bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(!r->is_humongous(), "Region %u is humongous", r->hrm_index());
|
||||
|
||||
// Only consider updating the remembered set for old gen regions - excluding archive regions
|
||||
// which never move (but are "Old" regions).
|
||||
if (r->is_old_or_humongous() && !r->is_archive()) {
|
||||
size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize;
|
||||
size_t total_live_bytes = live_bytes + between_ntams_and_top;
|
||||
// Completely free regions after rebuild are of no interest wrt rebuilding the
|
||||
// remembered set.
|
||||
assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
|
||||
// To be of interest for rebuilding the remembered set the following must apply:
|
||||
// - They must contain some live data in them.
|
||||
// - We always try to update the remembered sets of humongous regions containing
|
||||
// type arrays if they are empty as they might have been reset after full gc.
|
||||
// - Only need to rebuild non-complete remembered sets.
|
||||
// - Otherwise only add those old gen regions which occupancy is low enough that there
|
||||
// is a chance that we will ever evacuate them in the mixed gcs.
|
||||
if ((total_live_bytes > 0) &&
|
||||
(is_interesting_humongous_region(r) || CollectionSetChooser::region_occupancy_low_enough_for_evac(total_live_bytes)) &&
|
||||
!r->rem_set()->is_tracked()) {
|
||||
|
||||
r->rem_set()->set_state_updating();
|
||||
selected_for_rebuild = true;
|
||||
}
|
||||
log_trace(gc, remset, tracking)("Before rebuild region %u "
|
||||
"(ntams: " PTR_FORMAT ") "
|
||||
"total_live_bytes " SIZE_FORMAT " "
|
||||
"selected %s "
|
||||
"(live_bytes " SIZE_FORMAT " "
|
||||
"next_marked " SIZE_FORMAT " "
|
||||
"marked " SIZE_FORMAT " "
|
||||
"type %s)",
|
||||
r->hrm_index(),
|
||||
p2i(r->next_top_at_mark_start()),
|
||||
total_live_bytes,
|
||||
BOOL_TO_STR(selected_for_rebuild),
|
||||
live_bytes,
|
||||
r->next_marked_bytes(),
|
||||
r->marked_bytes(),
|
||||
r->get_type_str());
|
||||
if (!r->is_old() || r->is_archive()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
|
||||
|
||||
size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize;
|
||||
size_t total_live_bytes = live_bytes + between_ntams_and_top;
|
||||
|
||||
bool selected_for_rebuild = false;
|
||||
// For old regions, to be of interest for rebuilding the remembered set the following must apply:
|
||||
// - They must contain some live data in them.
|
||||
// - Only need to rebuild non-complete remembered sets.
|
||||
// - Otherwise only add those old gen regions which occupancy is low enough that there
|
||||
// is a chance that we will ever evacuate them in the mixed gcs.
|
||||
if ((total_live_bytes > 0) &&
|
||||
CollectionSetChooser::region_occupancy_low_enough_for_evac(total_live_bytes) &&
|
||||
!r->rem_set()->is_tracked()) {
|
||||
|
||||
r->rem_set()->set_state_updating();
|
||||
selected_for_rebuild = true;
|
||||
}
|
||||
|
||||
print_before_rebuild(r, selected_for_rebuild, total_live_bytes, live_bytes);
|
||||
|
||||
return selected_for_rebuild;
|
||||
}
|
||||
|
||||
@ -149,4 +175,3 @@ void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
|
||||
r->rem_set()->mem_size());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,10 +33,6 @@
|
||||
// the remembered set, ie. when it should be tracked, and if/when the remembered
|
||||
// set is complete.
|
||||
class G1RemSetTrackingPolicy : public CHeapObj<mtGC> {
|
||||
private:
|
||||
// Is the given region an interesting humongous region to start remembered set tracking
|
||||
// for?
|
||||
bool is_interesting_humongous_region(HeapRegion* r) const;
|
||||
public:
|
||||
// Do we need to scan the given region to get all outgoing references for remembered
|
||||
// set rebuild?
|
||||
@ -45,6 +41,9 @@ public:
|
||||
// called at any time. The caller makes sure that the changes to the remembered
|
||||
// set state are visible to other threads.
|
||||
void update_at_allocate(HeapRegion* r);
|
||||
// Update remembered set tracking state for humongous regions before we are going to
|
||||
// rebuild remembered sets. Called at safepoint in the remark pause.
|
||||
bool update_humongous_before_rebuild(HeapRegion* r, bool is_live);
|
||||
// Update remembered set tracking state before we are going to rebuild remembered
|
||||
// sets. Called at safepoint in the remark pause.
|
||||
bool update_before_rebuild(HeapRegion* r, size_t live_bytes);
|
||||
|
@ -80,8 +80,7 @@ static const char* ReferenceTypeNames[REF_PHANTOM + 1] = {
|
||||
STATIC_ASSERT((REF_PHANTOM + 1) == ARRAY_SIZE(ReferenceTypeNames));
|
||||
|
||||
static const char* phase_enum_2_phase_string(ReferenceProcessor::RefProcPhases phase) {
|
||||
assert(phase >= ReferenceProcessor::RefPhase1 && phase <= ReferenceProcessor::RefPhaseMax,
|
||||
"Invalid reference processing phase (%d)", phase);
|
||||
ASSERT_PHASE(phase);
|
||||
return PhaseNames[phase];
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
|
||||
#define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP
|
||||
|
||||
#include "gc/shared/blockOffsetTable.inline.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/generation.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
|
@ -81,7 +81,7 @@ bool ZDirector::rule_warmup() const {
|
||||
// Perform GC if heap usage passes 10/20/30% and no other GC has been
|
||||
// performed yet. This allows us to get some early samples of the GC
|
||||
// duration, which is needed by the other rules.
|
||||
const size_t max_capacity = ZHeap::heap()->max_capacity();
|
||||
const size_t max_capacity = ZHeap::heap()->current_max_capacity();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const double used_threshold_percent = (ZStatCycle::ncycles() + 1) * 0.1;
|
||||
const size_t used_threshold = max_capacity * used_threshold_percent;
|
||||
@ -107,7 +107,7 @@ bool ZDirector::rule_allocation_rate() const {
|
||||
// Calculate amount of free memory available to Java threads. Note that
|
||||
// the heap reserve is not available to Java threads and is therefore not
|
||||
// considered part of the free memory.
|
||||
const size_t max_capacity = ZHeap::heap()->max_capacity();
|
||||
const size_t max_capacity = ZHeap::heap()->current_max_capacity();
|
||||
const size_t max_reserve = ZHeap::heap()->max_reserve();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const size_t free_with_reserve = max_capacity - used;
|
||||
@ -155,7 +155,7 @@ bool ZDirector::rule_proactive() const {
|
||||
// passed since the previous GC. This helps avoid superfluous GCs when running
|
||||
// applications with very low allocation rate.
|
||||
const size_t used_after_last_gc = ZStatHeap::used_at_relocate_end();
|
||||
const size_t used_increase_threshold = ZHeap::heap()->max_capacity() * 0.10; // 10%
|
||||
const size_t used_increase_threshold = ZHeap::heap()->current_max_capacity() * 0.10; // 10%
|
||||
const size_t used_threshold = used_after_last_gc + used_increase_threshold;
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const double time_since_last_gc = ZStatCycle::time_since_last();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -21,6 +21,38 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is available under and governed by the GNU General Public
|
||||
* License version 2 only, as published by the Free Software Foundation.
|
||||
* However, the following notice accompanied the original version of this
|
||||
* file:
|
||||
*
|
||||
* (C) 2009 by Remo Dentato (rdentato@gmail.com)
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted provided that the following conditions are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* http://opensource.org/licenses/bsd-license.php
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZHASH_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZHASH_INLINE_HPP
|
||||
|
||||
|
@ -107,6 +107,10 @@ size_t ZHeap::max_capacity() const {
|
||||
return _page_allocator.max_capacity();
|
||||
}
|
||||
|
||||
size_t ZHeap::current_max_capacity() const {
|
||||
return _page_allocator.current_max_capacity();
|
||||
}
|
||||
|
||||
size_t ZHeap::capacity() const {
|
||||
return _page_allocator.capacity();
|
||||
}
|
||||
|
@ -79,6 +79,7 @@ public:
|
||||
// Heap metrics
|
||||
size_t min_capacity() const;
|
||||
size_t max_capacity() const;
|
||||
size_t current_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t max_reserve() const;
|
||||
size_t used_high() const;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -84,11 +84,12 @@ public:
|
||||
ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
|
||||
|
||||
ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve) :
|
||||
_lock(),
|
||||
_virtual(),
|
||||
_physical(max_capacity, ZPageSizeMin),
|
||||
_cache(),
|
||||
_pre_mapped(_virtual, _physical, min_capacity),
|
||||
_max_reserve(max_reserve),
|
||||
_pre_mapped(_virtual, _physical, try_ensure_unused_for_pre_mapped(min_capacity)),
|
||||
_used_high(0),
|
||||
_used_low(0),
|
||||
_used(0),
|
||||
@ -107,6 +108,10 @@ size_t ZPageAllocator::max_capacity() const {
|
||||
return _physical.max_capacity();
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::current_max_capacity() const {
|
||||
return _physical.current_max_capacity();
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::capacity() const {
|
||||
return _physical.capacity();
|
||||
}
|
||||
@ -169,18 +174,43 @@ void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::available(ZAllocationFlags flags) const {
|
||||
size_t available = max_capacity() - used();
|
||||
assert(_physical.available() + _pre_mapped.available() + _cache.available() == available, "Should be equal");
|
||||
size_t ZPageAllocator::max_available(bool no_reserve) const {
|
||||
size_t available = current_max_capacity() - used();
|
||||
|
||||
if (flags.no_reserve()) {
|
||||
// The memory reserve should not be considered free
|
||||
if (no_reserve) {
|
||||
// The reserve should not be considered available
|
||||
available -= MIN2(available, max_reserve());
|
||||
}
|
||||
|
||||
return available;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::try_ensure_unused(size_t size, bool no_reserve) {
|
||||
// Ensure that we always have space available for the reserve. This
|
||||
// is needed to avoid losing the reserve because of failure to map
|
||||
// more memory before reaching max capacity.
|
||||
_physical.try_ensure_unused_capacity(size + max_reserve());
|
||||
|
||||
size_t unused = _physical.unused_capacity();
|
||||
|
||||
if (no_reserve) {
|
||||
// The reserve should not be considered unused
|
||||
unused -= MIN2(unused, max_reserve());
|
||||
}
|
||||
|
||||
return MIN2(size, unused);
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::try_ensure_unused_for_pre_mapped(size_t size) {
|
||||
// This function is called during construction, where the
|
||||
// physical memory manager might have failed to initialied.
|
||||
if (!_physical.is_initialized()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return try_ensure_unused(size, true /* no_reserve */);
|
||||
}
|
||||
|
||||
ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
|
||||
// Allocate physical memory
|
||||
const ZPhysicalMemory pmem = _physical.alloc(size);
|
||||
@ -259,8 +289,8 @@ void ZPageAllocator::check_out_of_memory_during_initialization() {
|
||||
}
|
||||
|
||||
ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags) {
|
||||
const size_t available_total = available(flags);
|
||||
if (available_total < size) {
|
||||
const size_t max = max_available(flags.no_reserve());
|
||||
if (max < size) {
|
||||
// Not enough free memory
|
||||
return NULL;
|
||||
}
|
||||
@ -281,11 +311,11 @@ ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAlloc
|
||||
// subsequent allocations can use the physical memory.
|
||||
flush_pre_mapped();
|
||||
|
||||
// Check if physical memory is available
|
||||
const size_t available_physical = _physical.available();
|
||||
if (available_physical < size) {
|
||||
// Try ensure that physical memory is available
|
||||
const size_t unused = try_ensure_unused(size, flags.no_reserve());
|
||||
if (unused < size) {
|
||||
// Flush cache to free up more physical memory
|
||||
flush_cache(size - available_physical);
|
||||
flush_cache(size - unused);
|
||||
}
|
||||
|
||||
// Create new page and allocate physical memory
|
||||
@ -303,7 +333,7 @@ ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationF
|
||||
increase_used(size, flags.relocation());
|
||||
|
||||
// Send trace event
|
||||
ZTracer::tracer()->report_page_alloc(size, used(), available(flags), _cache.available(), flags);
|
||||
ZTracer::tracer()->report_page_alloc(size, used(), max_available(flags.no_reserve()), _cache.available(), flags);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -43,8 +43,8 @@ private:
|
||||
ZVirtualMemoryManager _virtual;
|
||||
ZPhysicalMemoryManager _physical;
|
||||
ZPageCache _cache;
|
||||
ZPreMappedMemory _pre_mapped;
|
||||
const size_t _max_reserve;
|
||||
ZPreMappedMemory _pre_mapped;
|
||||
size_t _used_high;
|
||||
size_t _used_low;
|
||||
size_t _used;
|
||||
@ -58,7 +58,9 @@ private:
|
||||
void increase_used(size_t size, bool relocation);
|
||||
void decrease_used(size_t size, bool reclaimed);
|
||||
|
||||
size_t available(ZAllocationFlags flags) const;
|
||||
size_t max_available(bool no_reserve) const;
|
||||
size_t try_ensure_unused(size_t size, bool no_reserve);
|
||||
size_t try_ensure_unused_for_pre_mapped(size_t size);
|
||||
|
||||
ZPage* create_page(uint8_t type, size_t size);
|
||||
void map_page(ZPage* page);
|
||||
@ -83,6 +85,7 @@ public:
|
||||
bool is_initialized() const;
|
||||
|
||||
size_t max_capacity() const;
|
||||
size_t current_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t max_reserve() const;
|
||||
size_t used_high() const;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory() :
|
||||
_nsegments(0),
|
||||
@ -93,6 +94,7 @@ void ZPhysicalMemory::clear() {
|
||||
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity, size_t granule_size) :
|
||||
_backing(max_capacity, granule_size),
|
||||
_max_capacity(max_capacity),
|
||||
_current_max_capacity(max_capacity),
|
||||
_capacity(0),
|
||||
_used(0) {}
|
||||
|
||||
@ -100,31 +102,34 @@ bool ZPhysicalMemoryManager::is_initialized() const {
|
||||
return _backing.is_initialized();
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::ensure_available(size_t size) {
|
||||
const size_t unused_capacity = _capacity - _used;
|
||||
if (unused_capacity >= size) {
|
||||
// Enough unused capacity available
|
||||
return true;
|
||||
void ZPhysicalMemoryManager::try_ensure_unused_capacity(size_t size) {
|
||||
const size_t unused = unused_capacity();
|
||||
if (unused >= size) {
|
||||
// Don't try to expand, enough unused capacity available
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t expand_with = size - unused_capacity;
|
||||
const size_t new_capacity = _capacity + expand_with;
|
||||
if (new_capacity > _max_capacity) {
|
||||
// Can not expand beyond max capacity
|
||||
return false;
|
||||
const size_t current_max = current_max_capacity();
|
||||
if (_capacity == current_max) {
|
||||
// Don't try to expand, current max capacity reached
|
||||
return;
|
||||
}
|
||||
|
||||
// Expand
|
||||
if (!_backing.expand(_capacity, new_capacity)) {
|
||||
log_error(gc)("Failed to expand Java heap with " SIZE_FORMAT "%s",
|
||||
byte_size_in_proper_unit(expand_with),
|
||||
proper_unit_for_byte_size(expand_with));
|
||||
return false;
|
||||
// Try to expand
|
||||
const size_t old_capacity = capacity();
|
||||
const size_t new_capacity = MIN2(old_capacity + size - unused, current_max);
|
||||
_capacity = _backing.try_expand(old_capacity, new_capacity);
|
||||
|
||||
if (_capacity != new_capacity) {
|
||||
// Failed, or partly failed, to expand
|
||||
log_error(gc, init)("Not enough space available on the backing filesystem to hold the current max");
|
||||
log_error(gc, init)("Java heap size (" SIZE_FORMAT "M). Forcefully lowering max Java heap size to "
|
||||
SIZE_FORMAT "M (%.0lf%%).", current_max / M, _capacity / M,
|
||||
percent_of(_capacity, current_max));
|
||||
|
||||
// Adjust current max capacity to avoid further expand attempts
|
||||
_current_max_capacity = _capacity;
|
||||
}
|
||||
|
||||
_capacity = new_capacity;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::nmt_commit(ZPhysicalMemory pmem, uintptr_t offset) {
|
||||
@ -144,7 +149,7 @@ void ZPhysicalMemoryManager::nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
|
||||
if (!ensure_available(size)) {
|
||||
if (unused_capacity() < size) {
|
||||
// Not enough memory available
|
||||
return ZPhysicalMemory();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -70,11 +70,10 @@ class ZPhysicalMemoryManager {
|
||||
private:
|
||||
ZPhysicalMemoryBacking _backing;
|
||||
const size_t _max_capacity;
|
||||
size_t _current_max_capacity;
|
||||
size_t _capacity;
|
||||
size_t _used;
|
||||
|
||||
bool ensure_available(size_t size);
|
||||
|
||||
void nmt_commit(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
void nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset);
|
||||
|
||||
@ -84,9 +83,11 @@ public:
|
||||
bool is_initialized() const;
|
||||
|
||||
size_t max_capacity() const;
|
||||
size_t current_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t used() const;
|
||||
size_t available() const;
|
||||
size_t unused_capacity() const;
|
||||
|
||||
void try_ensure_unused_capacity(size_t size);
|
||||
|
||||
ZPhysicalMemory alloc(size_t size);
|
||||
void free(ZPhysicalMemory pmem);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -71,16 +71,16 @@ inline size_t ZPhysicalMemoryManager::max_capacity() const {
|
||||
return _max_capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::current_max_capacity() const {
|
||||
return _current_max_capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::capacity() const {
|
||||
return _capacity;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::used() const {
|
||||
return _used;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemoryManager::available() const {
|
||||
return _max_capacity - _used;
|
||||
inline size_t ZPhysicalMemoryManager::unused_capacity() const {
|
||||
return _capacity - _used;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,21 +42,25 @@ ZPreMappedMemory::ZPreMappedMemory(ZVirtualMemoryManager &vmm, ZPhysicalMemoryMa
|
||||
log_info(gc, init)("Pre-touching: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
|
||||
log_info(gc, init)("Pre-mapping: " SIZE_FORMAT "M", size / M);
|
||||
|
||||
_pmem = pmm.alloc(size);
|
||||
if (_pmem.is_null()) {
|
||||
// Out of memory
|
||||
return;
|
||||
}
|
||||
if (size > 0) {
|
||||
_pmem = pmm.alloc(size);
|
||||
if (_pmem.is_null()) {
|
||||
// Out of memory
|
||||
log_error(gc, init)("Failed to pre-map Java heap (Cannot allocate physical memory)");
|
||||
return;
|
||||
}
|
||||
|
||||
_vmem = vmm.alloc(size, true /* alloc_from_front */);
|
||||
if (_vmem.is_null()) {
|
||||
// Out of address space
|
||||
pmm.free(_pmem);
|
||||
return;
|
||||
}
|
||||
_vmem = vmm.alloc(size, true /* alloc_from_front */);
|
||||
if (_vmem.is_null()) {
|
||||
// Out of address space
|
||||
log_error(gc, init)("Failed to pre-map Java heap (Cannot allocate virtual memory)");
|
||||
pmm.free(_pmem);
|
||||
return;
|
||||
}
|
||||
|
||||
// Map physical memory
|
||||
pmm.map(_pmem, _vmem.start());
|
||||
// Map physical memory
|
||||
pmm.map(_pmem, _vmem.start());
|
||||
}
|
||||
|
||||
_initialized = true;
|
||||
}
|
||||
|
@ -26,10 +26,10 @@
|
||||
#include "jfr/jfr.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
|
||||
#include "jfr/recorder/repository/jfrEmergencyDump.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/support/jfrThreadLocal.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
|
||||
@ -64,9 +64,7 @@ void Jfr::on_unloading_classes() {
|
||||
}
|
||||
|
||||
void Jfr::on_thread_exit(JavaThread* thread) {
|
||||
if (JfrRecorder::is_recording()) {
|
||||
JfrThreadLocal::on_exit(thread);
|
||||
}
|
||||
JfrThreadLocal::on_exit(thread);
|
||||
}
|
||||
|
||||
void Jfr::on_thread_destruct(Thread* thread) {
|
||||
|
@ -74,7 +74,6 @@ CLDClaimContext::CLDClaimContext(ClassLoaderData* cld) : _cld(cld) {
|
||||
|
||||
CLDClaimContext::~CLDClaimContext() {
|
||||
if (_cld != NULL) {
|
||||
assert(!_cld->claimed(), "invariant");
|
||||
_cld->claim();
|
||||
assert(_cld->claimed(), "invariant");
|
||||
}
|
||||
|
@ -60,22 +60,32 @@
|
||||
#include "gc/g1/g1YCTypes.hpp"
|
||||
#endif
|
||||
|
||||
class JfrCheckpointThreadCountClosure : public ThreadClosure {
|
||||
private:
|
||||
u4 _total_threads;
|
||||
public:
|
||||
JfrCheckpointThreadCountClosure() : _total_threads(0) {}
|
||||
u4 total_threads() { return _total_threads; }
|
||||
void do_thread(Thread *t) { _total_threads++; }
|
||||
};
|
||||
|
||||
// Requires a ResourceMark for get_thread_name/as_utf8
|
||||
class JfrCheckpointThreadClosure : public ThreadClosure {
|
||||
private:
|
||||
JfrCheckpointWriter& _writer;
|
||||
Thread* _curthread;
|
||||
JfrCheckpointContext _ctx;
|
||||
const intptr_t _count_position;
|
||||
Thread* const _curthread;
|
||||
u4 _count;
|
||||
|
||||
public:
|
||||
JfrCheckpointThreadClosure(JfrCheckpointWriter& writer) : _writer(writer), _curthread(Thread::current()) {}
|
||||
JfrCheckpointThreadClosure(JfrCheckpointWriter& writer) : _writer(writer),
|
||||
_ctx(writer.context()),
|
||||
_count_position(writer.reserve(sizeof(u4))),
|
||||
_curthread(Thread::current()),
|
||||
_count(0) {
|
||||
}
|
||||
|
||||
~JfrCheckpointThreadClosure() {
|
||||
if (_count == 0) {
|
||||
// restore
|
||||
_writer.set_context(_ctx);
|
||||
return;
|
||||
}
|
||||
_writer.write_count(_count, _count_position);
|
||||
}
|
||||
|
||||
void do_thread(Thread* t);
|
||||
};
|
||||
|
||||
@ -83,10 +93,16 @@ class JfrCheckpointThreadClosure : public ThreadClosure {
|
||||
void JfrCheckpointThreadClosure::do_thread(Thread* t) {
|
||||
assert(t != NULL, "invariant");
|
||||
assert_locked_or_safepoint(Threads_lock);
|
||||
_writer.write_key(t->jfr_thread_local()->thread_id());
|
||||
const JfrThreadLocal* const tl = t->jfr_thread_local();
|
||||
assert(tl != NULL, "invariant");
|
||||
if (tl->is_dead()) {
|
||||
return;
|
||||
}
|
||||
++_count;
|
||||
_writer.write_key(tl->thread_id());
|
||||
_writer.write(t->name());
|
||||
const OSThread* const os_thread = t->osthread();
|
||||
_writer.write<traceid>(os_thread != NULL ? os_thread->thread_id() : (u8)0);
|
||||
_writer.write<traceid>(os_thread != NULL ? os_thread->thread_id() : 0);
|
||||
if (t->is_Java_thread()) {
|
||||
JavaThread* const jt = (JavaThread*)t;
|
||||
_writer.write(jt->name());
|
||||
@ -97,17 +113,12 @@ void JfrCheckpointThreadClosure::do_thread(Thread* t) {
|
||||
return;
|
||||
}
|
||||
_writer.write((const char*)NULL); // java name
|
||||
_writer.write<traceid>((traceid)0); // java thread id
|
||||
_writer.write<traceid>((traceid)0); // java thread group
|
||||
_writer.write((traceid)0); // java thread id
|
||||
_writer.write((traceid)0); // java thread group
|
||||
}
|
||||
|
||||
void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
JfrCheckpointThreadCountClosure tcc;
|
||||
Threads::threads_do(&tcc);
|
||||
const u4 total_threads = tcc.total_threads();
|
||||
// THREADS
|
||||
writer.write_count(total_threads);
|
||||
JfrCheckpointThreadClosure tc(writer);
|
||||
Threads::threads_do(&tc);
|
||||
}
|
||||
@ -334,7 +345,7 @@ void JfrThreadConstant::serialize(JfrCheckpointWriter& writer) {
|
||||
writer.write_count(1);
|
||||
writer.write_key(_thread->jfr_thread_local()->thread_id());
|
||||
writer.write(thread_name);
|
||||
writer.write((u8)_thread->osthread()->thread_id());
|
||||
writer.write((traceid)_thread->osthread()->thread_id());
|
||||
writer.write(thread_name);
|
||||
writer.write(java_lang_thread_id);
|
||||
writer.write(thread_group_id);
|
||||
|
@ -148,9 +148,8 @@ void JfrTypeManager::write_safepoint_types(JfrCheckpointWriter& writer) {
|
||||
}
|
||||
|
||||
void JfrTypeManager::write_type_set() {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
// can safepoint here because of Module_lock
|
||||
MutexLockerEx lock(Module_lock);
|
||||
MutexLockerEx lock(SafepointSynchronize::is_at_safepoint() ? NULL : Module_lock);
|
||||
JfrCheckpointWriter writer(true, true, Thread::current());
|
||||
TypeSet set;
|
||||
set.serialize(writer);
|
||||
|
@ -23,8 +23,9 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
#include "jfr/periodic/jfrThreadCPULoadEvent.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
@ -51,7 +52,8 @@ JfrThreadLocal::JfrThreadLocal() :
|
||||
_wallclock_time(os::javaTimeNanos()),
|
||||
_stack_trace_hash(0),
|
||||
_stackdepth(0),
|
||||
_entering_suspend_flag(0) {}
|
||||
_entering_suspend_flag(0),
|
||||
_dead(false) {}
|
||||
|
||||
u8 JfrThreadLocal::add_data_lost(u8 value) {
|
||||
_data_lost += value;
|
||||
@ -71,9 +73,17 @@ const JfrCheckpointBlobHandle& JfrThreadLocal::thread_checkpoint() const {
|
||||
return _thread_cp;
|
||||
}
|
||||
|
||||
void JfrThreadLocal::set_dead() {
|
||||
assert(!is_dead(), "invariant");
|
||||
_dead = true;
|
||||
}
|
||||
|
||||
void JfrThreadLocal::on_exit(JavaThread* thread) {
|
||||
JfrCheckpointManager::write_thread_checkpoint(thread);
|
||||
JfrThreadCPULoadEvent::send_event_for_thread(thread);
|
||||
if (JfrRecorder::is_recording()) {
|
||||
JfrCheckpointManager::write_thread_checkpoint(thread);
|
||||
JfrThreadCPULoadEvent::send_event_for_thread(thread);
|
||||
}
|
||||
thread->jfr_thread_local()->set_dead();
|
||||
}
|
||||
|
||||
void JfrThreadLocal::on_destruct(Thread* thread) {
|
||||
|
@ -50,11 +50,14 @@ class JfrThreadLocal {
|
||||
unsigned int _stack_trace_hash;
|
||||
mutable u4 _stackdepth;
|
||||
volatile jint _entering_suspend_flag;
|
||||
bool _dead;
|
||||
|
||||
JfrBuffer* install_native_buffer() const;
|
||||
JfrBuffer* install_java_buffer() const;
|
||||
JfrStackFrame* install_stackframes() const;
|
||||
|
||||
void set_dead();
|
||||
|
||||
public:
|
||||
JfrThreadLocal();
|
||||
|
||||
@ -202,6 +205,10 @@ class JfrThreadLocal {
|
||||
_trace_id = id;
|
||||
}
|
||||
|
||||
bool is_dead() const {
|
||||
return _dead;
|
||||
}
|
||||
|
||||
bool has_thread_checkpoint() const;
|
||||
void set_thread_checkpoint(const JfrCheckpointBlobHandle& handle);
|
||||
const JfrCheckpointBlobHandle& thread_checkpoint() const;
|
||||
|
@ -807,6 +807,17 @@ void ConstantPool::save_and_throw_exception(const constantPoolHandle& this_cp, i
|
||||
}
|
||||
}
|
||||
|
||||
constantTag ConstantPool::constant_tag_at(int which) {
|
||||
constantTag tag = tag_at(which);
|
||||
if (tag.is_dynamic_constant() ||
|
||||
tag.is_dynamic_constant_in_error()) {
|
||||
// have to look at the signature for this one
|
||||
Symbol* constant_type = uncached_signature_ref_at(which);
|
||||
return constantTag::ofBasicType(FieldType::basic_type(constant_type));
|
||||
}
|
||||
return tag;
|
||||
}
|
||||
|
||||
BasicType ConstantPool::basic_type_for_constant_at(int which) {
|
||||
constantTag tag = tag_at(which);
|
||||
if (tag.is_dynamic_constant() ||
|
||||
|
@ -719,6 +719,9 @@ class ConstantPool : public Metadata {
|
||||
enum { _no_index_sentinel = -1, _possible_index_sentinel = -2 };
|
||||
public:
|
||||
|
||||
// Get the tag for a constant, which may involve a constant dynamic
|
||||
constantTag constant_tag_at(int which);
|
||||
// Get the basic type for a constant, which may involve a constant dynamic
|
||||
BasicType basic_type_for_constant_at(int which);
|
||||
|
||||
// Resolve late bound constants.
|
||||
|
@ -861,7 +861,9 @@ bool IdealLoopTree::policy_unroll(PhaseIdealLoop *phase) {
|
||||
|
||||
// Check for being too big
|
||||
if (body_size > (uint)_local_loop_unroll_limit) {
|
||||
if ((UseSubwordForMaxVector || xors_in_loop >= 4) && body_size < (uint)LoopUnrollLimit * 4) return true;
|
||||
if ((cl->is_subword_loop() || xors_in_loop >= 4) && body_size < (uint)LoopUnrollLimit * 4) {
|
||||
return true;
|
||||
}
|
||||
// Normal case: loop too big
|
||||
return false;
|
||||
}
|
||||
|
@ -616,6 +616,11 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop) {
|
||||
}
|
||||
|
||||
IfNode* check_iff = limit_check_proj->in(0)->as_If();
|
||||
|
||||
if (!is_dominator(get_ctrl(limit), check_iff->in(0))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Node* cmp_limit;
|
||||
Node* bol;
|
||||
|
||||
|
@ -75,7 +75,8 @@ protected:
|
||||
HasRangeChecks=8192,
|
||||
IsMultiversioned=16384,
|
||||
StripMined=32768,
|
||||
ProfileTripFailed=65536};
|
||||
SubwordLoop=65536,
|
||||
ProfileTripFailed=131072};
|
||||
char _unswitch_count;
|
||||
enum { _unswitch_max=3 };
|
||||
char _postloop_flags;
|
||||
@ -99,6 +100,7 @@ public:
|
||||
bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; }
|
||||
bool is_strip_mined() const { return _loop_flags & StripMined; }
|
||||
bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; }
|
||||
bool is_subword_loop() const { return _loop_flags & SubwordLoop; }
|
||||
|
||||
void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; }
|
||||
void mark_has_reductions() { _loop_flags |= HasReductions; }
|
||||
@ -112,6 +114,7 @@ public:
|
||||
void mark_strip_mined() { _loop_flags |= StripMined; }
|
||||
void clear_strip_mined() { _loop_flags &= ~StripMined; }
|
||||
void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; }
|
||||
void mark_subword_loop() { _loop_flags |= SubwordLoop; }
|
||||
|
||||
int unswitch_max() { return _unswitch_max; }
|
||||
int unswitch_count() { return _unswitch_count; }
|
||||
|
@ -376,6 +376,7 @@ void SuperWord::unrolling_analysis(int &local_loop_unroll_factor) {
|
||||
if (same_type) {
|
||||
max_vector = cur_max_vector;
|
||||
flag_small_bt = true;
|
||||
cl->mark_subword_loop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -963,7 +963,7 @@ bool Thread::owns_locks_but_compiled_lock() const {
|
||||
|
||||
// The flag: potential_vm_operation notifies if this particular safepoint state could potentially
|
||||
// invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that
|
||||
// no threads which allow_vm_block's are held
|
||||
// no locks which allow_vm_block's are held
|
||||
void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) {
|
||||
// Check if current thread is allowed to block at a safepoint
|
||||
if (!(_allow_safepoint_count == 0)) {
|
||||
|
@ -57,12 +57,18 @@ bool MallocSiteTable::initialize() {
|
||||
// Create pseudo call stack for hashtable entry allocation
|
||||
address pc[3];
|
||||
if (NMT_TrackingStackDepth >= 3) {
|
||||
pc[2] = (address)MallocSiteTable::allocation_at;
|
||||
uintx *fp = (uintx*)MallocSiteTable::allocation_at;
|
||||
// On ppc64, 'fp' is a pointer to a function descriptor which is a struct of
|
||||
// three native pointers where the first pointer is the real function address.
|
||||
// See: http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.html#FUNC-DES
|
||||
pc[2] = (address)(fp PPC64_ONLY(BIG_ENDIAN_ONLY([0])));
|
||||
}
|
||||
if (NMT_TrackingStackDepth >= 2) {
|
||||
pc[1] = (address)MallocSiteTable::lookup_or_add;
|
||||
uintx *fp = (uintx*)MallocSiteTable::lookup_or_add;
|
||||
pc[1] = (address)(fp PPC64_ONLY(BIG_ENDIAN_ONLY([0])));
|
||||
}
|
||||
pc[0] = (address)MallocSiteTable::new_entry;
|
||||
uintx *fp = (uintx*)MallocSiteTable::new_entry;
|
||||
pc[0] = (address)(fp PPC64_ONLY(BIG_ENDIAN_ONLY([0])));
|
||||
|
||||
static const NativeCallStack stack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth)));
|
||||
static const MallocSiteHashtableEntry entry(stack, mtNMT);
|
||||
|
@ -569,6 +569,14 @@
|
||||
#define NOT_AARCH64(code) code
|
||||
#endif
|
||||
|
||||
#ifdef VM_LITTLE_ENDIAN
|
||||
#define LITTLE_ENDIAN_ONLY(code) code
|
||||
#define BIG_ENDIAN_ONLY(code)
|
||||
#else
|
||||
#define LITTLE_ENDIAN_ONLY(code)
|
||||
#define BIG_ENDIAN_ONLY(code) code
|
||||
#endif
|
||||
|
||||
#define define_pd_global(type, name, value) const type pd_##name = value;
|
||||
|
||||
// Helper macros for constructing file names for includes.
|
||||
|
@ -27,6 +27,7 @@ package jdk.internal.platform.cgroupv1;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.math.BigInteger;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
@ -100,11 +101,20 @@ public class SubSystem {
|
||||
|
||||
public static long getLongValue(SubSystem subsystem, String parm) {
|
||||
String strval = getStringValue(subsystem, parm);
|
||||
long retval = 0;
|
||||
|
||||
if (strval == null) return 0L;
|
||||
|
||||
long retval = Long.parseLong(strval);
|
||||
|
||||
try {
|
||||
retval = Long.parseLong(strval);
|
||||
} catch (NumberFormatException e) {
|
||||
// For some properties (e.g. memory.limit_in_bytes) we may overflow the range of signed long.
|
||||
// In this case, return Long.max
|
||||
BigInteger b = new BigInteger(strval);
|
||||
if (b.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
|
||||
return Long.MAX_VALUE;
|
||||
}
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -658,7 +658,7 @@ final class SSLSessionImpl extends ExtendedSSLSession {
|
||||
*/
|
||||
@Override
|
||||
public Principal getLocalPrincipal() {
|
||||
return ((localCerts == null && localCerts.length != 0) ? null :
|
||||
return ((localCerts == null || localCerts.length == 0) ? null :
|
||||
localCerts[0].getSubjectX500Principal());
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.util.cldr;
|
||||
|
||||
import static sun.util.locale.provider.LocaleProviderAdapter.Type;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import sun.util.locale.provider.AvailableLanguageTags;
|
||||
import sun.util.locale.provider.CalendarNameProviderImpl;
|
||||
import sun.util.locale.provider.LocaleProviderAdapter;
|
||||
|
||||
|
||||
public class CLDRCalendarNameProviderImpl extends CalendarNameProviderImpl implements AvailableLanguageTags{
|
||||
|
||||
public CLDRCalendarNameProviderImpl(Type type, Set<String> langtags) {
|
||||
super(type, langtags);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSupportedLocale(Locale locale) {
|
||||
if (Locale.ROOT.equals(locale)) {
|
||||
return true;
|
||||
}
|
||||
String calendarType = null;
|
||||
if (locale.hasExtensions()) {
|
||||
calendarType = locale.getUnicodeLocaleType("ca");
|
||||
locale = locale.stripExtensions();
|
||||
}
|
||||
if (calendarType != null) {
|
||||
switch (calendarType) {
|
||||
case "buddhist":
|
||||
case "japanese":
|
||||
case "gregory":
|
||||
case "islamic":
|
||||
case "roc":
|
||||
break;
|
||||
default:
|
||||
// Unknown calendar type
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return LocaleProviderAdapter.forType(Type.CLDR).isSupportedProviderLocale(locale, langtags);
|
||||
}
|
||||
}
|
@ -45,6 +45,7 @@ import java.util.Set;
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.spi.CalendarDataProvider;
|
||||
import java.util.spi.CalendarNameProvider;
|
||||
import java.util.spi.TimeZoneNameProvider;
|
||||
import sun.util.locale.provider.JRELocaleProviderAdapter;
|
||||
import sun.util.locale.provider.LocaleDataMetaInfo;
|
||||
@ -132,6 +133,24 @@ public class CLDRLocaleProviderAdapter extends JRELocaleProviderAdapter {
|
||||
return calendarDataProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CalendarNameProvider getCalendarNameProvider() {
|
||||
if (calendarNameProvider == null) {
|
||||
CalendarNameProvider provider = AccessController.doPrivileged(
|
||||
(PrivilegedAction<CalendarNameProvider>) ()
|
||||
-> new CLDRCalendarNameProviderImpl(
|
||||
getAdapterType(),
|
||||
getLanguageTagSet("FormatData")));
|
||||
|
||||
synchronized (this) {
|
||||
if (calendarNameProvider == null) {
|
||||
calendarNameProvider = provider;
|
||||
}
|
||||
}
|
||||
}
|
||||
return calendarNameProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CollatorProvider getCollatorProvider() {
|
||||
return null;
|
||||
@ -166,7 +185,7 @@ public class CLDRLocaleProviderAdapter extends JRELocaleProviderAdapter {
|
||||
return locs;
|
||||
}
|
||||
|
||||
private Locale applyAliases(Locale loc) {
|
||||
private static Locale applyAliases(Locale loc) {
|
||||
if (langAliasesMap.isEmpty()) {
|
||||
langAliasesMap = baseMetaInfo.getLanguageAliasMap();
|
||||
}
|
||||
@ -264,19 +283,18 @@ public class CLDRLocaleProviderAdapter extends JRELocaleProviderAdapter {
|
||||
}
|
||||
|
||||
/**
|
||||
* This method returns equivalent CLDR supported locale for zh-HK,
|
||||
* no, no-NO locales so that COMPAT locales do not precede
|
||||
* those locales during ResourceBundle search path.
|
||||
* This method returns equivalent CLDR supported locale
|
||||
* for no, no-NO locales so that COMPAT locales do not precede
|
||||
* those locales during ResourceBundle search path, also if an alias exists for a locale,
|
||||
* it returns equivalent locale, e.g for zh_HK it returns zh_Hant-HK.
|
||||
*/
|
||||
private static Locale getEquivalentLoc(Locale locale) {
|
||||
switch (locale.toString()) {
|
||||
case "zh_HK":
|
||||
return Locale.forLanguageTag("zh-Hant-HK");
|
||||
case "no":
|
||||
case "no_NO":
|
||||
return Locale.forLanguageTag("nb");
|
||||
}
|
||||
return locale;
|
||||
return applyAliases(locale);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -42,8 +42,8 @@ import sun.util.calendar.Era;
|
||||
* @author Naoto Sato
|
||||
*/
|
||||
public class CalendarNameProviderImpl extends CalendarNameProvider implements AvailableLanguageTags {
|
||||
private final LocaleProviderAdapter.Type type;
|
||||
private final Set<String> langtags;
|
||||
protected final LocaleProviderAdapter.Type type;
|
||||
protected final Set<String> langtags;
|
||||
|
||||
public CalendarNameProviderImpl(LocaleProviderAdapter.Type type, Set<String> langtags) {
|
||||
this.type = type;
|
||||
@ -248,11 +248,8 @@ public class CalendarNameProviderImpl extends CalendarNameProvider implements Av
|
||||
if (langtags.contains(locale.toLanguageTag())) {
|
||||
return true;
|
||||
}
|
||||
if (type == LocaleProviderAdapter.Type.JRE) {
|
||||
String oldname = locale.toString().replace('_', '-');
|
||||
return langtags.contains(oldname);
|
||||
}
|
||||
return false;
|
||||
String oldname = locale.toString().replace('_', '-');
|
||||
return langtags.contains(oldname);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -131,7 +131,7 @@ public class JRELocaleProviderAdapter extends LocaleProviderAdapter implements R
|
||||
private volatile LocaleNameProvider localeNameProvider;
|
||||
protected volatile TimeZoneNameProvider timeZoneNameProvider;
|
||||
protected volatile CalendarDataProvider calendarDataProvider;
|
||||
private volatile CalendarNameProvider calendarNameProvider;
|
||||
protected volatile CalendarNameProvider calendarNameProvider;
|
||||
|
||||
private volatile CalendarProvider calendarProvider;
|
||||
private volatile JavaTimeDateTimePatternProvider javaTimeDateTimePatternProvider;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -297,7 +297,23 @@ static int ParseLocale(JNIEnv* env, int cat, char ** std_language, char ** std_s
|
||||
if (strcmp(p, "EUC-JP") == 0) {
|
||||
*std_encoding = "EUC-JP-LINUX";
|
||||
}
|
||||
#else
|
||||
#endif
|
||||
|
||||
#ifdef _AIX
|
||||
if (strcmp(p, "big5") == 0) {
|
||||
/* On AIX Traditional Chinese Big5 codeset is mapped to IBM-950 */
|
||||
*std_encoding = "IBM-950";
|
||||
} else if (strcmp(p, "IBM-943") == 0) {
|
||||
/*
|
||||
* On AIX, IBM-943 is mapped to IBM-943C in which symbol 'yen' and
|
||||
* 'overline' are replaced with 'backslash' and 'tilde' from ASCII
|
||||
* making first 96 code points same as ASCII.
|
||||
*/
|
||||
*std_encoding = "IBM-943C";
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __solaris__
|
||||
if (strcmp(p,"eucJP") == 0) {
|
||||
/* For Solaris use customized vendor defined character
|
||||
* customized EUC-JP converter
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -45,7 +45,10 @@ import sun.awt.AWTAccessor;
|
||||
* for limited use outside of the core platform. This API may change
|
||||
* drastically between update release, and it may even be
|
||||
* removed or be moved to some other packages or classes.
|
||||
*
|
||||
* @deprecated This class is deprecated, no replacement.
|
||||
*/
|
||||
@Deprecated(since = "11", forRemoval = true)
|
||||
public final class SecurityWarning {
|
||||
|
||||
/**
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -308,7 +308,12 @@ final class EventDispatcher implements Runnable {
|
||||
* called from auto-closing clips when their closed() method is called.
|
||||
*/
|
||||
void autoClosingClipClosed(AutoClosingClip clip) {
|
||||
// nothing to do -- is removed from arraylist above
|
||||
synchronized(autoClosingClips) {
|
||||
int index = getAutoClosingClipIndex(clip);
|
||||
if (index != -1) {
|
||||
autoClosingClips.remove(index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -173,29 +173,31 @@ public final class JavaSoundAudioClip implements AudioClip, MetaEventListener, L
|
||||
if (DEBUG || Printer.debug) Printer.debug("JavaSoundAudioClip.startImpl(loop="+loop+")");
|
||||
try {
|
||||
if (clip != null) {
|
||||
if (!clip.isOpen()) {
|
||||
if (DEBUG || Printer.trace)Printer.trace("JavaSoundAudioClip: clip.open()");
|
||||
clip.open(loadedAudioFormat, loadedAudio, 0, loadedAudioByteLength);
|
||||
} else {
|
||||
if (DEBUG || Printer.trace)Printer.trace("JavaSoundAudioClip: clip.flush()");
|
||||
clip.flush();
|
||||
if (loop != clipLooping) {
|
||||
// need to stop in case the looped status changed
|
||||
if (DEBUG || Printer.trace)Printer.trace("JavaSoundAudioClip: clip.stop()");
|
||||
clip.stop();
|
||||
// We need to disable autoclosing mechanism otherwise the clip
|
||||
// can be closed after "!clip.isOpen()" check, because of
|
||||
// previous inactivity.
|
||||
clip.setAutoClosing(false);
|
||||
try {
|
||||
if (!clip.isOpen()) {
|
||||
clip.open(loadedAudioFormat, loadedAudio, 0,
|
||||
loadedAudioByteLength);
|
||||
} else {
|
||||
clip.flush();
|
||||
if (loop != clipLooping) {
|
||||
// need to stop in case the looped status changed
|
||||
clip.stop();
|
||||
}
|
||||
}
|
||||
clip.setFramePosition(0);
|
||||
if (loop) {
|
||||
clip.loop(Clip.LOOP_CONTINUOUSLY);
|
||||
} else {
|
||||
clip.start();
|
||||
}
|
||||
clipLooping = loop;
|
||||
} finally {
|
||||
clip.setAutoClosing(true);
|
||||
}
|
||||
clip.setFramePosition(0);
|
||||
if (loop) {
|
||||
if (DEBUG || Printer.trace)Printer.trace("JavaSoundAudioClip: clip.loop()");
|
||||
clip.loop(Clip.LOOP_CONTINUOUSLY);
|
||||
} else {
|
||||
if (DEBUG || Printer.trace)Printer.trace("JavaSoundAudioClip: clip.start()");
|
||||
clip.start();
|
||||
}
|
||||
clipLooping = loop;
|
||||
if (DEBUG || Printer.debug)Printer.debug("Clip should be playing/looping");
|
||||
|
||||
} else if (datapusher != null ) {
|
||||
datapusher.start(loop);
|
||||
if (DEBUG || Printer.debug)Printer.debug("Stream should be playing/looping");
|
||||
|
@ -5987,7 +5987,8 @@ public abstract class Component implements ImageObserver, MenuContainer,
|
||||
* {@code InputMethodRequests} instance.
|
||||
* If listener {@code l} is {@code null},
|
||||
* no exception is thrown and no action is performed.
|
||||
* <p>Refer to <a href="{@docRoot}/java/awt/doc-files/AWTThreadIssues.html#ListenersThreads"
|
||||
* <p>Refer to
|
||||
* <a href="{@docRoot}/java.desktop/java/awt/doc-files/AWTThreadIssues.html#ListenersThreads"
|
||||
* >AWT Threading Issues</a> for details on AWT's threading model.
|
||||
*
|
||||
* @param l the input method listener
|
||||
|
@ -90,7 +90,8 @@ public interface Shape {
|
||||
* representation.
|
||||
*
|
||||
* <p>
|
||||
* Note that the <a href="{@docRoot}/java/awt/Shape.html#def_insideness">
|
||||
* Note that the
|
||||
* <a href="{@docRoot}/java.desktop/java/awt/Shape.html#def_insideness">
|
||||
* definition of insideness</a> can lead to situations where points
|
||||
* on the defining outline of the {@code shape} may not be considered
|
||||
* contained in the returned {@code bounds} object, but only in cases
|
||||
@ -135,7 +136,8 @@ public interface Shape {
|
||||
* store the dimensions.
|
||||
*
|
||||
* <p>
|
||||
* Note that the <a href="{@docRoot}/java/awt/Shape.html#def_insideness">
|
||||
* Note that the
|
||||
* <a href="{@docRoot}/java.desktop/java/awt/Shape.html#def_insideness">
|
||||
* definition of insideness</a> can lead to situations where points
|
||||
* on the defining outline of the {@code shape} may not be considered
|
||||
* contained in the returned {@code bounds} object, but only in cases
|
||||
@ -169,7 +171,7 @@ public interface Shape {
|
||||
/**
|
||||
* Tests if the specified coordinates are inside the boundary of the
|
||||
* {@code Shape}, as described by the
|
||||
* <a href="{@docRoot}/java/awt/Shape.html#def_insideness">
|
||||
* <a href="{@docRoot}/java.desktop/java/awt/Shape.html#def_insideness">
|
||||
* definition of insideness</a>.
|
||||
* @param x the specified X coordinate to be tested
|
||||
* @param y the specified Y coordinate to be tested
|
||||
@ -183,7 +185,7 @@ public interface Shape {
|
||||
/**
|
||||
* Tests if a specified {@link Point2D} is inside the boundary
|
||||
* of the {@code Shape}, as described by the
|
||||
* <a href="{@docRoot}/java/awt/Shape.html#def_insideness">
|
||||
* <a href="{@docRoot}/java.desktop/java/awt/Shape.html#def_insideness">
|
||||
* definition of insideness</a>.
|
||||
* @param p the specified {@code Point2D} to be tested
|
||||
* @return {@code true} if the specified {@code Point2D} is
|
||||
|
@ -198,9 +198,7 @@ class IIOAttr extends IIOMetadataNode implements Attr {
|
||||
|
||||
/**
|
||||
* A class representing a node in a meta-data tree, which implements
|
||||
* the <a
|
||||
* href="../../../../api/org/w3c/dom/Element.html">
|
||||
* {@code org.w3c.dom.Element}</a> interface and additionally allows
|
||||
* the {@link Element org.w3c.dom.Element} interface and additionally allows
|
||||
* for the storage of non-textual objects via the
|
||||
* {@code getUserObject} and {@code setUserObject} methods.
|
||||
*
|
||||
|
@ -151,7 +151,7 @@ public class JButton extends AbstractButton implements Accessible {
|
||||
* @see UIDefaults#getUI
|
||||
*/
|
||||
@BeanProperty(bound = false, expert = true, description
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
public String getUIClassID() {
|
||||
return uiClassID;
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ public class JCheckBox extends JToggleButton implements Accessible {
|
||||
* @see UIDefaults#getUI
|
||||
*/
|
||||
@BeanProperty(bound = false, expert = true, description
|
||||
= "A string that specifies the name of the L&F class")
|
||||
= "A string that specifies the name of the L&F class")
|
||||
public String getUIClassID() {
|
||||
return uiClassID;
|
||||
}
|
||||
|
@ -861,7 +861,7 @@ public class JFileChooser extends JComponent implements Accessible {
|
||||
* @since 1.3
|
||||
*/
|
||||
@BeanProperty(preferred = true, description
|
||||
= "Sets whether the approve & cancel buttons are shown.")
|
||||
= "Sets whether the approve & cancel buttons are shown.")
|
||||
public void setControlButtonsAreShown(boolean b) {
|
||||
if(controlsShown == b) {
|
||||
return;
|
||||
@ -1838,7 +1838,7 @@ public class JFileChooser extends JComponent implements Accessible {
|
||||
* @see UIDefaults#getUI
|
||||
*/
|
||||
@BeanProperty(bound = false, expert = true, description
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
public String getUIClassID() {
|
||||
return uiClassID;
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ public class JPanel extends JComponent implements Accessible
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the look and feel (L&amp;F) object that renders this component.
|
||||
* Returns the look and feel (L&F) object that renders this component.
|
||||
*
|
||||
* @return the PanelUI object that renders this component
|
||||
* @since 1.4
|
||||
@ -159,7 +159,7 @@ public class JPanel extends JComponent implements Accessible
|
||||
* @see UIDefaults#getUI
|
||||
*/
|
||||
@BeanProperty(bound = false, expert = true, description
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
public String getUIClassID() {
|
||||
return uiClassID;
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ public class JRadioButton extends JToggleButton implements Accessible {
|
||||
* @see UIDefaults#getUI
|
||||
*/
|
||||
@BeanProperty(bound = false, expert = true, description
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
public String getUIClassID() {
|
||||
return uiClassID;
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ public class JSplitPane extends JComponent implements Accessible
|
||||
* @return the <code>SplitPaneUI</code> object that renders this component
|
||||
*/
|
||||
@BeanProperty(bound = false, expert = true, description
|
||||
= "The L&F object that renders this component.")
|
||||
= "The L&F object that renders this component.")
|
||||
public SplitPaneUI getUI() {
|
||||
return (SplitPaneUI)ui;
|
||||
}
|
||||
@ -404,7 +404,7 @@ public class JSplitPane extends JComponent implements Accessible
|
||||
* @see UIDefaults#getUI
|
||||
*/
|
||||
@BeanProperty(bound = false, expert = true, description
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
= "A string that specifies the name of the L&F class.")
|
||||
public String getUIClassID() {
|
||||
return uiClassID;
|
||||
}
|
||||
@ -824,7 +824,7 @@ public class JSplitPane extends JComponent implements Accessible
|
||||
* <code>null</code>
|
||||
*/
|
||||
@BeanProperty(bound = false, description
|
||||
= "The minimum location of the divider from the L&F.")
|
||||
= "The minimum location of the divider from the L&F.")
|
||||
public int getMinimumDividerLocation() {
|
||||
SplitPaneUI ui = getUI();
|
||||
|
||||
|
@ -195,7 +195,7 @@ public class JToggleButton extends AbstractButton implements Accessible {
|
||||
* @see UIDefaults#getUI
|
||||
*/
|
||||
@BeanProperty(bound = false, description
|
||||
= "A string that specifies the name of the L&F class")
|
||||
= "A string that specifies the name of the L&F class")
|
||||
public String getUIClassID() {
|
||||
return uiClassID;
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
## Harfbuzz v1.8.1
|
||||
## Harfbuzz v1.8.2
|
||||
|
||||
### Harfbuzz License
|
||||
|
||||
|
@ -489,10 +489,10 @@ hb_blob_t::try_make_writable (void)
|
||||
|
||||
#if defined(_WIN32) || defined(__CYGWIN__)
|
||||
# include <windows.h>
|
||||
#endif
|
||||
|
||||
#ifndef _O_BINARY
|
||||
# define _O_BINARY 0
|
||||
#else
|
||||
# ifndef _O_BINARY
|
||||
# define _O_BINARY 0
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef MAP_NORESERVE
|
||||
@ -517,7 +517,7 @@ _hb_mapped_file_destroy (hb_mapped_file_t *file)
|
||||
UnmapViewOfFile (file->contents);
|
||||
CloseHandle (file->mapping);
|
||||
#else
|
||||
free (file->contents);
|
||||
assert (0); // If we don't have mmap we shouldn't reach here
|
||||
#endif
|
||||
|
||||
free (file);
|
||||
@ -534,77 +534,103 @@ _hb_mapped_file_destroy (hb_mapped_file_t *file)
|
||||
hb_blob_t *
|
||||
hb_blob_create_from_file (const char *file_name)
|
||||
{
|
||||
// Adopted from glib's gmappedfile.c with Matthias Clasen and
|
||||
// Allison Lortie permission but changed a lot to suit our need.
|
||||
bool writable = false;
|
||||
hb_memory_mode_t mm = HB_MEMORY_MODE_READONLY_MAY_MAKE_WRITABLE;
|
||||
/* Adopted from glib's gmappedfile.c with Matthias Clasen and
|
||||
Allison Lortie permission but changed a lot to suit our need. */
|
||||
#if defined(HAVE_MMAP) && !defined(HB_NO_MMAP)
|
||||
hb_mapped_file_t *file = (hb_mapped_file_t *) calloc (1, sizeof (hb_mapped_file_t));
|
||||
if (unlikely (!file)) return hb_blob_get_empty ();
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
int fd = open (file_name, (writable ? O_RDWR : O_RDONLY) | _O_BINARY, 0);
|
||||
# define CLOSE close
|
||||
int fd = open (file_name, O_RDONLY | _O_BINARY, 0);
|
||||
if (unlikely (fd == -1)) goto fail_without_close;
|
||||
|
||||
struct stat st;
|
||||
if (unlikely (fstat (fd, &st) == -1)) goto fail;
|
||||
|
||||
// See https://github.com/GNOME/glib/blob/f9faac7/glib/gmappedfile.c#L139-L142
|
||||
if (unlikely (st.st_size == 0 && S_ISREG (st.st_mode))) goto fail;
|
||||
|
||||
file->length = (unsigned long) st.st_size;
|
||||
file->contents = (char *) mmap (nullptr, file->length,
|
||||
writable ? PROT_READ|PROT_WRITE : PROT_READ,
|
||||
file->contents = (char *) mmap (nullptr, file->length, PROT_READ,
|
||||
MAP_PRIVATE | MAP_NORESERVE, fd, 0);
|
||||
|
||||
if (unlikely (file->contents == MAP_FAILED)) goto fail;
|
||||
|
||||
#elif defined(_WIN32) || defined(__CYGWIN__)
|
||||
HANDLE fd = CreateFile (file_name,
|
||||
writable ? GENERIC_READ|GENERIC_WRITE : GENERIC_READ,
|
||||
FILE_SHARE_READ, nullptr, OPEN_EXISTING,
|
||||
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED, nullptr);
|
||||
# define CLOSE CloseHandle
|
||||
close (fd);
|
||||
|
||||
return hb_blob_create (file->contents, file->length,
|
||||
HB_MEMORY_MODE_READONLY_MAY_MAKE_WRITABLE, (void *) file,
|
||||
(hb_destroy_func_t) _hb_mapped_file_destroy);
|
||||
|
||||
fail:
|
||||
close (fd);
|
||||
fail_without_close:
|
||||
free (file);
|
||||
|
||||
#elif (defined(_WIN32) || defined(__CYGWIN__)) && !defined(HB_NO_MMAP)
|
||||
hb_mapped_file_t *file = (hb_mapped_file_t *) calloc (1, sizeof (hb_mapped_file_t));
|
||||
if (unlikely (!file)) return hb_blob_get_empty ();
|
||||
|
||||
HANDLE fd = CreateFile (file_name, GENERIC_READ, FILE_SHARE_READ, nullptr,
|
||||
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL|FILE_FLAG_OVERLAPPED,
|
||||
nullptr);
|
||||
|
||||
if (unlikely (fd == INVALID_HANDLE_VALUE)) goto fail_without_close;
|
||||
|
||||
file->length = (unsigned long) GetFileSize (fd, nullptr);
|
||||
file->mapping = CreateFileMapping (fd, nullptr,
|
||||
writable ? PAGE_WRITECOPY : PAGE_READONLY,
|
||||
0, 0, nullptr);
|
||||
file->mapping = CreateFileMapping (fd, nullptr, PAGE_READONLY, 0, 0, nullptr);
|
||||
if (unlikely (file->mapping == nullptr)) goto fail;
|
||||
|
||||
file->contents = (char *) MapViewOfFile (file->mapping,
|
||||
writable ? FILE_MAP_COPY : FILE_MAP_READ,
|
||||
0, 0, 0);
|
||||
file->contents = (char *) MapViewOfFile (file->mapping, FILE_MAP_READ, 0, 0, 0);
|
||||
if (unlikely (file->contents == nullptr)) goto fail;
|
||||
|
||||
#else
|
||||
mm = HB_MEMORY_MODE_WRITABLE;
|
||||
|
||||
FILE *fd = fopen (file_name, "rb");
|
||||
# define CLOSE fclose
|
||||
if (unlikely (!fd)) goto fail_without_close;
|
||||
|
||||
fseek (fd, 0, SEEK_END);
|
||||
file->length = ftell (fd);
|
||||
rewind (fd);
|
||||
file->contents = (char *) malloc (file->length);
|
||||
if (unlikely (!file->contents)) goto fail;
|
||||
|
||||
if (unlikely (fread (file->contents, 1, file->length, fd) != file->length))
|
||||
goto fail;
|
||||
|
||||
#endif
|
||||
|
||||
CLOSE (fd);
|
||||
return hb_blob_create (file->contents, file->length, mm, (void *) file,
|
||||
CloseHandle (fd);
|
||||
return hb_blob_create (file->contents, file->length,
|
||||
HB_MEMORY_MODE_READONLY_MAY_MAKE_WRITABLE, (void *) file,
|
||||
(hb_destroy_func_t) _hb_mapped_file_destroy);
|
||||
|
||||
fail:
|
||||
CLOSE (fd);
|
||||
#undef CLOSE
|
||||
CloseHandle (fd);
|
||||
fail_without_close:
|
||||
free (file);
|
||||
|
||||
#endif
|
||||
|
||||
/* The following tries to read a file without knowing its size beforehand
|
||||
It's used as a fallback for systems without mmap or to read from pipes */
|
||||
unsigned long len = 0, allocated = BUFSIZ * 16;
|
||||
char *data = (char *) malloc (allocated);
|
||||
if (unlikely (data == nullptr)) return hb_blob_get_empty ();
|
||||
|
||||
FILE *fp = fopen (file_name, "rb");
|
||||
if (unlikely (fp == nullptr)) goto fread_fail_without_close;
|
||||
|
||||
while (!feof (fp))
|
||||
{
|
||||
if (allocated - len < BUFSIZ)
|
||||
{
|
||||
allocated *= 2;
|
||||
/* Don't allocate and go more than ~536MB, our mmap reader still
|
||||
can cover files like that but lets limit our fallback reader */
|
||||
if (unlikely (allocated > (2 << 28))) goto fread_fail;
|
||||
char *new_data = (char *) realloc (data, allocated);
|
||||
if (unlikely (new_data == nullptr)) goto fread_fail;
|
||||
data = new_data;
|
||||
}
|
||||
|
||||
unsigned long addition = fread (data + len, 1, allocated - len, fp);
|
||||
|
||||
int err = ferror (fp);
|
||||
#ifdef EINTR // armcc doesn't have it
|
||||
if (unlikely (err == EINTR)) continue;
|
||||
#endif
|
||||
if (unlikely (err)) goto fread_fail;
|
||||
|
||||
len += addition;
|
||||
}
|
||||
|
||||
return hb_blob_create (data, len, HB_MEMORY_MODE_WRITABLE, data,
|
||||
(hb_destroy_func_t) free);
|
||||
|
||||
fread_fail:
|
||||
fclose (fp);
|
||||
fread_fail_without_close:
|
||||
free (data);
|
||||
return hb_blob_get_empty ();
|
||||
}
|
||||
|
@ -286,6 +286,197 @@ struct TTCHeader
|
||||
} u;
|
||||
};
|
||||
|
||||
/*
|
||||
* Mac Resource Fork
|
||||
*/
|
||||
|
||||
struct ResourceRefItem
|
||||
{
|
||||
inline bool sanitize (hb_sanitize_context_t *c) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
// actual data sanitization is done on ResourceForkHeader sanitizer
|
||||
return_trace (likely (c->check_struct (this)));
|
||||
}
|
||||
|
||||
HBINT16 id; /* Resource ID, is really should be signed? */
|
||||
HBINT16 nameOffset; /* Offset from beginning of resource name list
|
||||
* to resource name, minus means there is no */
|
||||
HBUINT8 attr; /* Resource attributes */
|
||||
HBUINT24 dataOffset; /* Offset from beginning of resource data to
|
||||
* data for this resource */
|
||||
HBUINT32 reserved; /* Reserved for handle to resource */
|
||||
public:
|
||||
DEFINE_SIZE_STATIC (12);
|
||||
};
|
||||
|
||||
struct ResourceTypeItem
|
||||
{
|
||||
inline bool sanitize (hb_sanitize_context_t *c) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
// RefList sanitization is done on ResourceMap sanitizer
|
||||
return_trace (likely (c->check_struct (this)));
|
||||
}
|
||||
|
||||
inline unsigned int get_resource_count () const
|
||||
{
|
||||
return numRes + 1;
|
||||
}
|
||||
|
||||
inline bool is_sfnt () const
|
||||
{
|
||||
return type == HB_TAG ('s','f','n','t');
|
||||
}
|
||||
|
||||
inline const ResourceRefItem& get_ref_item (const void *base,
|
||||
unsigned int i) const
|
||||
{
|
||||
return (base+refList)[i];
|
||||
}
|
||||
|
||||
protected:
|
||||
Tag type; /* Resource type */
|
||||
HBUINT16 numRes; /* Number of resource this type in map minus 1 */
|
||||
OffsetTo<UnsizedArrayOf<ResourceRefItem> >
|
||||
refList; /* Offset from beginning of resource type list
|
||||
* to reference list for this type */
|
||||
public:
|
||||
DEFINE_SIZE_STATIC (8);
|
||||
};
|
||||
|
||||
struct ResourceMap
|
||||
{
|
||||
inline bool sanitize (hb_sanitize_context_t *c) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
if (unlikely (!c->check_struct (this)))
|
||||
return_trace (false);
|
||||
for (unsigned int i = 0; i < get_types_count (); ++i)
|
||||
{
|
||||
const ResourceTypeItem& type = get_type (i);
|
||||
if (unlikely (!type.sanitize (c)))
|
||||
return_trace (false);
|
||||
for (unsigned int j = 0; j < type.get_resource_count (); ++j)
|
||||
if (unlikely (!get_ref_item (type, j).sanitize (c)))
|
||||
return_trace (false);
|
||||
}
|
||||
return_trace (true);
|
||||
}
|
||||
|
||||
inline const ResourceTypeItem& get_type (unsigned int i) const
|
||||
{
|
||||
// Why offset from the second byte of the object? I'm not sure
|
||||
return ((&reserved[2])+typeList)[i];
|
||||
}
|
||||
|
||||
inline unsigned int get_types_count () const
|
||||
{
|
||||
return nTypes + 1;
|
||||
}
|
||||
|
||||
inline const ResourceRefItem &get_ref_item (const ResourceTypeItem &type,
|
||||
unsigned int i) const
|
||||
{
|
||||
return type.get_ref_item (&(this+typeList), i);
|
||||
}
|
||||
|
||||
inline const PString& get_name (const ResourceRefItem &item,
|
||||
unsigned int i) const
|
||||
{
|
||||
if (item.nameOffset == -1)
|
||||
return Null (PString);
|
||||
|
||||
return StructAtOffset<PString> (this, nameList + item.nameOffset);
|
||||
}
|
||||
|
||||
protected:
|
||||
HBUINT8 reserved[16]; /* Reserved for copy of resource header */
|
||||
LOffsetTo<ResourceMap>
|
||||
reserved1; /* Reserved for handle to next resource map */
|
||||
HBUINT16 reserved2; /* Reserved for file reference number */
|
||||
HBUINT16 attr; /* Resource fork attribute */
|
||||
OffsetTo<UnsizedArrayOf<ResourceTypeItem> >
|
||||
typeList; /* Offset from beginning of map to
|
||||
* resource type list */
|
||||
HBUINT16 nameList; /* Offset from beginning of map to
|
||||
* resource name list */
|
||||
HBUINT16 nTypes; /* Number of types in the map minus 1 */
|
||||
public:
|
||||
DEFINE_SIZE_STATIC (30);
|
||||
};
|
||||
|
||||
struct ResourceForkHeader
|
||||
{
|
||||
inline unsigned int get_face_count () const
|
||||
{
|
||||
const ResourceMap &resource_map = this+map;
|
||||
for (unsigned int i = 0; i < resource_map.get_types_count (); ++i)
|
||||
{
|
||||
const ResourceTypeItem& type = resource_map.get_type (i);
|
||||
if (type.is_sfnt ())
|
||||
return type.get_resource_count ();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline const LArrayOf<HBUINT8>& get_data (const ResourceTypeItem& type,
|
||||
unsigned int idx) const
|
||||
{
|
||||
const ResourceMap &resource_map = this+map;
|
||||
unsigned int offset = dataOffset;
|
||||
offset += resource_map.get_ref_item (type, idx).dataOffset;
|
||||
return StructAtOffset<LArrayOf<HBUINT8> > (this, offset);
|
||||
}
|
||||
|
||||
inline const OpenTypeFontFace& get_face (unsigned int idx) const
|
||||
{
|
||||
const ResourceMap &resource_map = this+map;
|
||||
for (unsigned int i = 0; i < resource_map.get_types_count (); ++i)
|
||||
{
|
||||
const ResourceTypeItem& type = resource_map.get_type (i);
|
||||
if (type.is_sfnt () && idx < type.get_resource_count ())
|
||||
return (OpenTypeFontFace&) get_data (type, idx).arrayZ;
|
||||
}
|
||||
return Null (OpenTypeFontFace);
|
||||
}
|
||||
|
||||
inline bool sanitize (hb_sanitize_context_t *c) const
|
||||
{
|
||||
TRACE_SANITIZE (this);
|
||||
if (unlikely (!c->check_struct (this)))
|
||||
return_trace (false);
|
||||
|
||||
const ResourceMap &resource_map = this+map;
|
||||
if (unlikely (!resource_map.sanitize (c)))
|
||||
return_trace (false);
|
||||
|
||||
for (unsigned int i = 0; i < resource_map.get_types_count (); ++i)
|
||||
{
|
||||
const ResourceTypeItem& type = resource_map.get_type (i);
|
||||
for (unsigned int j = 0; j < type.get_resource_count (); ++j)
|
||||
{
|
||||
const LArrayOf<HBUINT8>& data = get_data (type, j);
|
||||
if (unlikely (!(data.sanitize (c) &&
|
||||
((OpenTypeFontFace&) data.arrayZ).sanitize (c))))
|
||||
return_trace (false);
|
||||
}
|
||||
}
|
||||
|
||||
return_trace (true);
|
||||
}
|
||||
|
||||
protected:
|
||||
HBUINT32 dataOffset; /* Offset from beginning of resource fork
|
||||
* to resource data */
|
||||
LOffsetTo<ResourceMap>
|
||||
map; /* Offset from beginning of resource fork
|
||||
* to resource map */
|
||||
HBUINT32 dataLen; /* Length of resource data */
|
||||
HBUINT32 mapLen; /* Length of resource map */
|
||||
public:
|
||||
DEFINE_SIZE_STATIC (16);
|
||||
};
|
||||
|
||||
/*
|
||||
* OpenType Font File
|
||||
@ -299,6 +490,7 @@ struct OpenTypeFontFile
|
||||
CFFTag = HB_TAG ('O','T','T','O'), /* OpenType with Postscript outlines */
|
||||
TrueTypeTag = HB_TAG ( 0 , 1 , 0 , 0 ), /* OpenType with TrueType outlines */
|
||||
TTCTag = HB_TAG ('t','t','c','f'), /* TrueType Collection */
|
||||
DFontTag = HB_TAG ( 0 , 0 , 1 , 0 ), /* DFont Mac Resource Fork */
|
||||
TrueTag = HB_TAG ('t','r','u','e'), /* Obsolete Apple TrueType */
|
||||
Typ1Tag = HB_TAG ('t','y','p','1') /* Obsolete Apple Type1 font in SFNT container */
|
||||
};
|
||||
@ -313,6 +505,7 @@ struct OpenTypeFontFile
|
||||
case Typ1Tag:
|
||||
case TrueTypeTag: return 1;
|
||||
case TTCTag: return u.ttcHeader.get_face_count ();
|
||||
// case DFontTag: return u.rfHeader.get_face_count ();
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
@ -327,6 +520,7 @@ struct OpenTypeFontFile
|
||||
case Typ1Tag:
|
||||
case TrueTypeTag: return u.fontFace;
|
||||
case TTCTag: return u.ttcHeader.get_face (i);
|
||||
// case DFontTag: return u.rfHeader.get_face (i);
|
||||
default: return Null(OpenTypeFontFace);
|
||||
}
|
||||
}
|
||||
@ -353,6 +547,7 @@ struct OpenTypeFontFile
|
||||
case Typ1Tag:
|
||||
case TrueTypeTag: return_trace (u.fontFace.sanitize (c));
|
||||
case TTCTag: return_trace (u.ttcHeader.sanitize (c));
|
||||
// case DFontTag: return_trace (u.rfHeader.sanitize (c));
|
||||
default: return_trace (true);
|
||||
}
|
||||
}
|
||||
@ -362,6 +557,7 @@ struct OpenTypeFontFile
|
||||
Tag tag; /* 4-byte identifier. */
|
||||
OpenTypeFontFace fontFace;
|
||||
TTCHeader ttcHeader;
|
||||
ResourceForkHeader rfHeader;
|
||||
} u;
|
||||
public:
|
||||
DEFINE_SIZE_UNION (4, tag);
|
||||
|
@ -1033,6 +1033,7 @@ struct ArrayOf
|
||||
DEFINE_SIZE_ARRAY (sizeof (LenType), arrayZ);
|
||||
};
|
||||
template <typename Type> struct LArrayOf : ArrayOf<Type, HBUINT32> {};
|
||||
typedef ArrayOf<HBUINT8, HBUINT8> PString;
|
||||
|
||||
/* Array of Offset's */
|
||||
template <typename Type, typename OffsetType=HBUINT16>
|
||||
|
@ -832,7 +832,12 @@ struct CoverageFormat2
|
||||
c = &c_;
|
||||
coverage = 0;
|
||||
i = 0;
|
||||
j = c->rangeRecord.len ? c_.rangeRecord[0].start : 0;
|
||||
j = c->rangeRecord.len ? c->rangeRecord[0].start : 0;
|
||||
if (unlikely (c->rangeRecord[0].start > c->rangeRecord[0].end))
|
||||
{
|
||||
/* Broken table. Skip. */
|
||||
i = c->rangeRecord.len;
|
||||
}
|
||||
}
|
||||
inline bool more (void) { return i < c->rangeRecord.len; }
|
||||
inline void next (void)
|
||||
@ -842,7 +847,14 @@ struct CoverageFormat2
|
||||
i++;
|
||||
if (more ())
|
||||
{
|
||||
hb_codepoint_t old = j;
|
||||
j = c->rangeRecord[i].start;
|
||||
if (unlikely (j <= old))
|
||||
{
|
||||
/* Broken table. Skip. Important to avoid DoS. */
|
||||
i = c->rangeRecord.len;
|
||||
return;
|
||||
}
|
||||
coverage = c->rangeRecord[i].value;
|
||||
}
|
||||
return;
|
||||
@ -855,7 +867,8 @@ struct CoverageFormat2
|
||||
|
||||
private:
|
||||
const struct CoverageFormat2 *c;
|
||||
unsigned int i, j, coverage;
|
||||
unsigned int i, coverage;
|
||||
hb_codepoint_t j;
|
||||
};
|
||||
private:
|
||||
|
||||
|
@ -1074,10 +1074,13 @@ struct MarkBasePosFormat1
|
||||
if (!skippy_iter.prev ()) return_trace (false);
|
||||
/* We only want to attach to the first of a MultipleSubst sequence.
|
||||
* https://github.com/harfbuzz/harfbuzz/issues/740
|
||||
* Reject others. */
|
||||
* Reject others...
|
||||
* ...but stop if we find a mark in the MultipleSubst sequence:
|
||||
* https://github.com/harfbuzz/harfbuzz/issues/1020 */
|
||||
if (!_hb_glyph_info_multiplied (&buffer->info[skippy_iter.idx]) ||
|
||||
0 == _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) ||
|
||||
(skippy_iter.idx == 0 ||
|
||||
_hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx - 1]) ||
|
||||
_hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx]) !=
|
||||
_hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx - 1]) ||
|
||||
_hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) !=
|
||||
|
@ -668,8 +668,9 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan,
|
||||
*
|
||||
* Reports suggest that in some scripts Uniscribe does this only if there
|
||||
* is *not* a Halant after last consonant already (eg. Kannada), while it
|
||||
* does it unconditionally in other scripts (eg. Malayalam). We don't
|
||||
* currently know about other scripts, so we single out Malayalam for now.
|
||||
* does it unconditionally in other scripts (eg. Malayalam, Bengali). We
|
||||
* don't currently know about other scripts, so we whitelist Malayalam and
|
||||
* Bengali for now.
|
||||
*
|
||||
* Kannada test case:
|
||||
* U+0C9A,U+0CCD,U+0C9A,U+0CCD
|
||||
@ -679,10 +680,16 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan,
|
||||
* Malayalam test case:
|
||||
* U+0D38,U+0D4D,U+0D31,U+0D4D,U+0D31,U+0D4D
|
||||
* With lohit-ttf-20121122/Lohit-Malayalam.ttf
|
||||
*
|
||||
* Bengali test case
|
||||
* U+0998,U+09CD,U+09AF,U+09CD
|
||||
* With Windows XP vrinda.ttf
|
||||
* https://github.com/harfbuzz/harfbuzz/issues/1073
|
||||
*/
|
||||
if (indic_plan->is_old_spec)
|
||||
{
|
||||
bool disallow_double_halants = buffer->props.script != HB_SCRIPT_MALAYALAM;
|
||||
bool disallow_double_halants = buffer->props.script != HB_SCRIPT_MALAYALAM &&
|
||||
buffer->props.script != HB_SCRIPT_BENGALI;
|
||||
for (unsigned int i = base + 1; i < end; i++)
|
||||
if (info[i].indic_category() == OT_H)
|
||||
{
|
||||
|
@ -372,22 +372,25 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan,
|
||||
break;
|
||||
}
|
||||
|
||||
/* Note! syllable() is a one-byte field. */
|
||||
for (unsigned int i = base; i < end; i++)
|
||||
if (info[i].syllable() != 255)
|
||||
{
|
||||
unsigned int max = i;
|
||||
unsigned int j = start + info[i].syllable();
|
||||
while (j != i)
|
||||
if (unlikely (end - start >= 127))
|
||||
buffer->merge_clusters (start, end);
|
||||
else
|
||||
/* Note! syllable() is a one-byte field. */
|
||||
for (unsigned int i = base; i < end; i++)
|
||||
if (info[i].syllable() != 255)
|
||||
{
|
||||
max = MAX (max, j);
|
||||
unsigned int next = start + info[j].syllable();
|
||||
info[j].syllable() = 255; /* So we don't process j later again. */
|
||||
j = next;
|
||||
unsigned int max = i;
|
||||
unsigned int j = start + info[i].syllable();
|
||||
while (j != i)
|
||||
{
|
||||
max = MAX (max, j);
|
||||
unsigned int next = start + info[j].syllable();
|
||||
info[j].syllable() = 255; /* So we don't process j later again. */
|
||||
j = next;
|
||||
}
|
||||
if (i != max)
|
||||
buffer->merge_clusters (i, max + 1);
|
||||
}
|
||||
if (i != max)
|
||||
buffer->merge_clusters (i, max + 1);
|
||||
}
|
||||
|
||||
/* Put syllable back in. */
|
||||
for (unsigned int i = start; i < end; i++)
|
||||
|
@ -1228,13 +1228,14 @@ struct hb_bytes_t
|
||||
/* fallback for round() */
|
||||
#if !defined (HAVE_ROUND) && !defined (HAVE_DECL_ROUND)
|
||||
static inline double
|
||||
round (double x)
|
||||
_hb_round (double x)
|
||||
{
|
||||
if (x >= 0)
|
||||
return floor (x + 0.5);
|
||||
else
|
||||
return ceil (x - 0.5);
|
||||
}
|
||||
#define round(x) _hb_round(x)
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -38,9 +38,9 @@ HB_BEGIN_DECLS
|
||||
|
||||
#define HB_VERSION_MAJOR 1
|
||||
#define HB_VERSION_MINOR 8
|
||||
#define HB_VERSION_MICRO 1
|
||||
#define HB_VERSION_MICRO 2
|
||||
|
||||
#define HB_VERSION_STRING "1.8.1"
|
||||
#define HB_VERSION_STRING "1.8.2"
|
||||
|
||||
#define HB_VERSION_ATLEAST(major,minor,micro) \
|
||||
((major)*10000+(minor)*100+(micro) <= \
|
||||
|
@ -29,6 +29,16 @@
|
||||
#include <dlfcn.h>
|
||||
#include <cups/cups.h>
|
||||
#include <cups/ppd.h>
|
||||
/*
|
||||
* CUPS #define's __attribute__(x) to be empty unless __GNUC__ is defined.
|
||||
* However OpenJDK officially uses the SunStudio compiler on Solaris.
|
||||
* We need to #undef this else it breaks use of this keyword used by JNIEXPORT.
|
||||
* See: https://github.com/apple/cups/issues/5349
|
||||
*/
|
||||
#ifdef __SUNPRO_C
|
||||
#undef __attribute__
|
||||
#endif
|
||||
|
||||
|
||||
//#define CUPS_DEBUG
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -170,7 +170,7 @@ final class Win32ShellFolder2 extends ShellFolder {
|
||||
}
|
||||
|
||||
// Known Folder data
|
||||
static class KnownFolderDefinition {
|
||||
static final class KnownFolderDefinition {
|
||||
String guid;
|
||||
int category;
|
||||
String name;
|
||||
@ -187,7 +187,10 @@ final class Win32ShellFolder2 extends ShellFolder {
|
||||
String ftidType;
|
||||
String path;
|
||||
String saveLocation;
|
||||
static final List<KnownFolderDefinition> libraries = getLibraries();
|
||||
}
|
||||
|
||||
static final class KnownLibraries {
|
||||
static final List<KnownFolderDefinition> INSTANCE = getLibraries();
|
||||
}
|
||||
|
||||
static class FolderDisposer implements sun.java2d.DisposerRecord {
|
||||
@ -625,7 +628,7 @@ final class Win32ShellFolder2 extends ShellFolder {
|
||||
// this is a temp fix until java.io starts support Libraries
|
||||
if( path != null && path.startsWith("::{") &&
|
||||
path.toLowerCase().endsWith(".library-ms")) {
|
||||
for (KnownFolderDefinition kf : KnownFolderDefinition.libraries) {
|
||||
for (KnownFolderDefinition kf : KnownLibraries.INSTANCE) {
|
||||
if (path.toLowerCase().endsWith(
|
||||
"\\" + kf.relativePath.toLowerCase()) &&
|
||||
path.toUpperCase().startsWith(
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1398,7 +1398,7 @@ JNIEXPORT jobjectArray JNICALL Java_sun_awt_shell_Win32ShellFolder2_loadKnownFol
|
||||
DEFINE_FIELD_ID(field_defenitionFlags, cl, "defenitionFlags", "I");
|
||||
DEFINE_FIELD_ID(field_ftidType, cl, "ftidType", "Ljava/lang/String;");
|
||||
|
||||
jobjectArray result;
|
||||
jobjectArray result = NULL;
|
||||
KNOWNFOLDERID* pFoldersIds = NULL;
|
||||
UINT count = 0;
|
||||
if (SUCCEEDED(pkfm->GetFolderIds(&pFoldersIds, &count))) {
|
||||
|
@ -27,6 +27,7 @@ package jdk.internal.net.http;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import javax.net.ssl.SNIHostName;
|
||||
@ -89,11 +90,30 @@ abstract class AbstractAsyncSSLConnection extends HttpConnection
|
||||
|
||||
final SSLEngine getEngine() { return engine; }
|
||||
|
||||
private static boolean contains(String[] rr, String target) {
|
||||
for (String s : rr)
|
||||
if (target.equalsIgnoreCase(s))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
private static SSLParameters createSSLParameters(HttpClientImpl client,
|
||||
ServerName serverName,
|
||||
String[] alpn) {
|
||||
SSLParameters sslp = client.sslParameters();
|
||||
SSLParameters sslParameters = Utils.copySSLParameters(sslp);
|
||||
// filter out unwanted protocols, if h2 only
|
||||
if (alpn != null && alpn.length != 0 && !contains(alpn, "http/1.1")) {
|
||||
ArrayDeque<String> l = new ArrayDeque<>();
|
||||
for (String proto : sslParameters.getProtocols()) {
|
||||
if (!proto.startsWith("SSL") && !proto.endsWith("v1.1") && !proto.endsWith("v1")) {
|
||||
l.add(proto);
|
||||
}
|
||||
}
|
||||
String[] a1 = l.toArray(new String[0]);
|
||||
sslParameters.setProtocols(a1);
|
||||
}
|
||||
|
||||
if (!disableHostnameVerification)
|
||||
sslParameters.setEndpointIdentificationAlgorithm("HTTPS");
|
||||
if (alpn != null) {
|
||||
@ -112,10 +132,12 @@ abstract class AbstractAsyncSSLConnection extends HttpConnection
|
||||
return sslParameters;
|
||||
}
|
||||
|
||||
|
||||
private static SSLEngine createEngine(SSLContext context, String serverName, int port,
|
||||
SSLParameters sslParameters) {
|
||||
SSLEngine engine = context.createSSLEngine(serverName, port);
|
||||
engine.setUseClientMode(true);
|
||||
|
||||
engine.setSSLParameters(sslParameters);
|
||||
return engine;
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ final class Exchange<T> {
|
||||
proxyResponse.version, true);
|
||||
return MinimalFuture.completedFuture(syntheticResponse);
|
||||
} else if (t != null) {
|
||||
if (debug.on()) debug.log("checkFor407: no response - %s", t);
|
||||
if (debug.on()) debug.log("checkFor407: no response - %s", (Object)t);
|
||||
return MinimalFuture.failedFuture(t);
|
||||
} else {
|
||||
if (debug.on()) debug.log("checkFor407: all clear");
|
||||
|
@ -386,8 +386,11 @@ class Http1AsyncReceiver {
|
||||
// we have a flow List<ByteBuffer> upstream.
|
||||
Http1AsyncDelegateSubscription subscription =
|
||||
new Http1AsyncDelegateSubscription(scheduler, cancel, onSubscriptionError);
|
||||
pending.onSubscribe(subscription);
|
||||
this.delegate = delegate = pending;
|
||||
try {
|
||||
pending.onSubscribe(subscription);
|
||||
} finally {
|
||||
this.delegate = delegate = pending;
|
||||
}
|
||||
final Object captured = delegate;
|
||||
if (debug.on())
|
||||
debug.log("delegate is now " + captured
|
||||
@ -485,10 +488,11 @@ class Http1AsyncReceiver {
|
||||
error = ex;
|
||||
}
|
||||
}
|
||||
final Throwable t = (recorded == null ? ex : recorded);
|
||||
if (debug.on())
|
||||
debug.log("recorded " + t + "\n\t delegate: " + delegate
|
||||
+ "\t\t queue.isEmpty: " + queue.isEmpty(), ex);
|
||||
|
||||
final Throwable t = (recorded == null ? ex : recorded);
|
||||
if (debug.on())
|
||||
debug.log("recorded " + t + "\n\t delegate: " + delegate
|
||||
+ "\t\t queue.isEmpty: " + queue.isEmpty(), ex);
|
||||
if (Log.errors()) {
|
||||
Log.logError("HTTP/1 read subscriber recorded error: {0} - {1}", describe(), t);
|
||||
}
|
||||
|
@ -257,6 +257,14 @@ class Http1Exchange<T> extends ExchangeImpl<T> {
|
||||
.thenCompose(unused -> {
|
||||
CompletableFuture<Void> cf = new MinimalFuture<>();
|
||||
try {
|
||||
asyncReceiver.whenFinished.whenComplete((r,t) -> {
|
||||
if (t != null) {
|
||||
if (debug.on())
|
||||
debug.log("asyncReceiver finished (failed=%s)", (Object)t);
|
||||
if (!headersSentCF.isDone())
|
||||
headersSentCF.completeAsync(() -> this, executor);
|
||||
}
|
||||
});
|
||||
connectFlows(connection);
|
||||
|
||||
if (debug.on()) debug.log("requestAction.headers");
|
||||
@ -282,7 +290,8 @@ class Http1Exchange<T> extends ExchangeImpl<T> {
|
||||
|
||||
private void cancelIfFailed(Flow.Subscription s) {
|
||||
asyncReceiver.whenFinished.whenCompleteAsync((r,t) -> {
|
||||
if (debug.on()) debug.log("asyncReceiver finished (failed=%s)", t);
|
||||
if (debug.on())
|
||||
debug.log("asyncReceiver finished (failed=%s)", (Object)t);
|
||||
if (t != null) {
|
||||
s.cancel();
|
||||
// Don't complete exceptionally here as 't'
|
||||
|
@ -673,7 +673,11 @@ class Http2Connection {
|
||||
client2.deleteConnection(this);
|
||||
List<Stream<?>> c = new LinkedList<>(streams.values());
|
||||
for (Stream<?> s : c) {
|
||||
s.connectionClosing(t);
|
||||
try {
|
||||
s.connectionClosing(t);
|
||||
} catch (Throwable e) {
|
||||
Log.logError("Failed to close stream {0}: {1}", s.streamid, e);
|
||||
}
|
||||
}
|
||||
connection.close();
|
||||
}
|
||||
@ -738,6 +742,9 @@ class Http2Connection {
|
||||
}
|
||||
|
||||
if (!(frame instanceof ResetFrame)) {
|
||||
if (frame instanceof DataFrame) {
|
||||
dropDataFrame((DataFrame)frame);
|
||||
}
|
||||
if (isServerInitiatedStream(streamid)) {
|
||||
if (streamid < nextPushStream) {
|
||||
// trailing data on a cancelled push promise stream,
|
||||
@ -776,6 +783,27 @@ class Http2Connection {
|
||||
}
|
||||
}
|
||||
|
||||
final void dropDataFrame(DataFrame df) {
|
||||
if (closed) return;
|
||||
if (debug.on()) {
|
||||
debug.log("Dropping data frame for stream %d (%d payload bytes)",
|
||||
df.streamid(), df.payloadLength());
|
||||
}
|
||||
ensureWindowUpdated(df);
|
||||
}
|
||||
|
||||
final void ensureWindowUpdated(DataFrame df) {
|
||||
try {
|
||||
if (closed) return;
|
||||
int length = df.payloadLength();
|
||||
if (length > 0) {
|
||||
windowUpdater.update(length);
|
||||
}
|
||||
} catch(Throwable t) {
|
||||
Log.logError("Unexpected exception while updating window: {0}", (Object)t);
|
||||
}
|
||||
}
|
||||
|
||||
private <T> void handlePushPromise(Stream<T> parent, PushPromiseFrame pp)
|
||||
throws IOException
|
||||
{
|
||||
@ -984,7 +1012,6 @@ class Http2Connection {
|
||||
connection.channel().getLocalAddress(),
|
||||
connection.address());
|
||||
SettingsFrame sf = new SettingsFrame(clientSettings);
|
||||
int initialWindowSize = sf.getParameter(INITIAL_WINDOW_SIZE);
|
||||
ByteBuffer buf = framesEncoder.encodeConnectionPreface(PREFACE_BYTES, sf);
|
||||
Log.logFrames(sf, "OUT");
|
||||
// send preface bytes and SettingsFrame together
|
||||
@ -997,9 +1024,20 @@ class Http2Connection {
|
||||
Log.logTrace("Settings Frame sent");
|
||||
|
||||
// send a Window update for the receive buffer we are using
|
||||
// minus the initial 64 K specified in protocol
|
||||
final int len = windowUpdater.initialWindowSize - initialWindowSize;
|
||||
if (len > 0) {
|
||||
// minus the initial 64 K -1 specified in protocol:
|
||||
// RFC 7540, Section 6.9.2:
|
||||
// "[...] the connection flow-control window is set to the default
|
||||
// initial window size until a WINDOW_UPDATE frame is received."
|
||||
//
|
||||
// Note that the default initial window size, not to be confused
|
||||
// with the initial window size, is defined by RFC 7540 as
|
||||
// 64K -1.
|
||||
final int len = windowUpdater.initialWindowSize - DEFAULT_INITIAL_WINDOW_SIZE;
|
||||
if (len != 0) {
|
||||
if (Log.channel()) {
|
||||
Log.logChannel("Sending initial connection window update frame: {0} ({1} - {2})",
|
||||
len, windowUpdater.initialWindowSize, DEFAULT_INITIAL_WINDOW_SIZE);
|
||||
}
|
||||
windowUpdater.sendWindowUpdate(len);
|
||||
}
|
||||
// there will be an ACK to the windows update - which should
|
||||
@ -1132,6 +1170,7 @@ class Http2Connection {
|
||||
|
||||
private Stream<?> registerNewStream(OutgoingHeaders<Stream<?>> oh) {
|
||||
Stream<?> stream = oh.getAttachment();
|
||||
assert stream.streamid == 0;
|
||||
int streamid = nextstreamid;
|
||||
nextstreamid += 2;
|
||||
stream.registerStream(streamid);
|
||||
|
@ -329,7 +329,18 @@ final class HttpClientImpl extends HttpClient implements Trackable {
|
||||
|
||||
private static SSLParameters getDefaultParams(SSLContext ctx) {
|
||||
SSLParameters params = ctx.getSupportedSSLParameters();
|
||||
params.setProtocols(new String[]{"TLSv1.2"});
|
||||
String[] protocols = params.getProtocols();
|
||||
boolean found13 = false;
|
||||
for (String proto : protocols) {
|
||||
if (proto.equals("TLSv1.3")) {
|
||||
found13 = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found13)
|
||||
params.setProtocols(new String[] {"TLSv1.3", "TLSv1.2"});
|
||||
else
|
||||
params.setProtocols(new String[] {"TLSv1.2"});
|
||||
return params;
|
||||
}
|
||||
|
||||
|
@ -360,7 +360,6 @@ final class SocketTube implements FlowTube {
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
signalError(t);
|
||||
subscription.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
@ -424,6 +423,8 @@ final class SocketTube implements FlowTube {
|
||||
}
|
||||
completed = true;
|
||||
readPublisher.signalError(error);
|
||||
Flow.Subscription subscription = this.subscription;
|
||||
if (subscription != null) subscription.cancel();
|
||||
}
|
||||
|
||||
// A repeatable WriteEvent which is paused after firing and can
|
||||
@ -468,7 +469,11 @@ final class SocketTube implements FlowTube {
|
||||
|
||||
@Override
|
||||
public void cancel() {
|
||||
if (cancelled) return;
|
||||
if (debug.on()) debug.log("write: cancel");
|
||||
if (Log.channel()) {
|
||||
Log.logChannel("Cancelling write subscription");
|
||||
}
|
||||
dropSubscription();
|
||||
upstreamSubscription.cancel();
|
||||
}
|
||||
@ -503,9 +508,7 @@ final class SocketTube implements FlowTube {
|
||||
} catch (Throwable t) {
|
||||
if (debug.on())
|
||||
debug.log("write: error while requesting more: " + t);
|
||||
cancelled = true;
|
||||
signalError(t);
|
||||
subscription.cancel();
|
||||
} finally {
|
||||
debugState("leaving requestMore: ");
|
||||
}
|
||||
|
@ -185,6 +185,7 @@ class Stream<T> extends ExchangeImpl<T> {
|
||||
int size = Utils.remaining(dsts, Integer.MAX_VALUE);
|
||||
if (size == 0 && finished) {
|
||||
inputQ.remove();
|
||||
connection.ensureWindowUpdated(df); // must update connection window
|
||||
Log.logTrace("responseSubscriber.onComplete");
|
||||
if (debug.on()) debug.log("incoming: onComplete");
|
||||
sched.stop();
|
||||
@ -197,7 +198,12 @@ class Stream<T> extends ExchangeImpl<T> {
|
||||
inputQ.remove();
|
||||
Log.logTrace("responseSubscriber.onNext {0}", size);
|
||||
if (debug.on()) debug.log("incoming: onNext(%d)", size);
|
||||
subscriber.onNext(dsts);
|
||||
try {
|
||||
subscriber.onNext(dsts);
|
||||
} catch (Throwable t) {
|
||||
connection.dropDataFrame(df); // must update connection window
|
||||
throw t;
|
||||
}
|
||||
if (consumed(df)) {
|
||||
Log.logTrace("responseSubscriber.onComplete");
|
||||
if (debug.on()) debug.log("incoming: onComplete");
|
||||
@ -215,6 +221,8 @@ class Stream<T> extends ExchangeImpl<T> {
|
||||
}
|
||||
} catch (Throwable throwable) {
|
||||
errorRef.compareAndSet(null, throwable);
|
||||
} finally {
|
||||
if (sched.isStopped()) drainInputQueue();
|
||||
}
|
||||
|
||||
Throwable t = errorRef.get();
|
||||
@ -223,20 +231,35 @@ class Stream<T> extends ExchangeImpl<T> {
|
||||
try {
|
||||
if (!onCompleteCalled) {
|
||||
if (debug.on())
|
||||
debug.log("calling subscriber.onError: %s", (Object)t);
|
||||
debug.log("calling subscriber.onError: %s", (Object) t);
|
||||
subscriber.onError(t);
|
||||
} else {
|
||||
if (debug.on())
|
||||
debug.log("already completed: dropping error %s", (Object)t);
|
||||
debug.log("already completed: dropping error %s", (Object) t);
|
||||
}
|
||||
} catch (Throwable x) {
|
||||
Log.logError("Subscriber::onError threw exception: {0}", (Object)t);
|
||||
Log.logError("Subscriber::onError threw exception: {0}", (Object) t);
|
||||
} finally {
|
||||
cancelImpl(t);
|
||||
drainInputQueue();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// must only be called from the scheduler schedule() loop.
|
||||
// ensure that all received data frames are accounted for
|
||||
// in the connection window flow control if the scheduler
|
||||
// is stopped before all the data is consumed.
|
||||
private void drainInputQueue() {
|
||||
Http2Frame frame;
|
||||
while ((frame = inputQ.poll()) != null) {
|
||||
if (frame instanceof DataFrame) {
|
||||
connection.dropDataFrame((DataFrame)frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Callback invoked after the Response BodySubscriber has consumed the
|
||||
// buffers contained in a DataFrame.
|
||||
// Returns true if END_STREAM is reached, false otherwise.
|
||||
@ -245,15 +268,19 @@ class Stream<T> extends ExchangeImpl<T> {
|
||||
// The entire DATA frame payload is included in flow control,
|
||||
// including the Pad Length and Padding fields if present
|
||||
int len = df.payloadLength();
|
||||
boolean endStream = df.getFlag(DataFrame.END_STREAM);
|
||||
if (len == 0) return endStream;
|
||||
|
||||
connection.windowUpdater.update(len);
|
||||
|
||||
if (!df.getFlag(DataFrame.END_STREAM)) {
|
||||
if (!endStream) {
|
||||
// Don't send window update on a stream which is
|
||||
// closed or half closed.
|
||||
windowUpdater.update(len);
|
||||
return false; // more data coming
|
||||
}
|
||||
return true; // end of stream
|
||||
|
||||
// true: end of stream; false: more data coming
|
||||
return endStream;
|
||||
}
|
||||
|
||||
boolean deRegister() {
|
||||
@ -500,8 +527,8 @@ class Stream<T> extends ExchangeImpl<T> {
|
||||
{
|
||||
int amount = frame.getUpdate();
|
||||
if (amount <= 0) {
|
||||
Log.logTrace("Resetting stream: {0} %d, Window Update amount: %d\n",
|
||||
streamid, streamid, amount);
|
||||
Log.logTrace("Resetting stream: {0}, Window Update amount: {1}",
|
||||
streamid, amount);
|
||||
connection.resetStream(streamid, ResetFrame.FLOW_CONTROL_ERROR);
|
||||
} else {
|
||||
assert streamid != 0;
|
||||
@ -1126,7 +1153,7 @@ class Stream<T> extends ExchangeImpl<T> {
|
||||
connection.resetStream(streamid, ResetFrame.CANCEL);
|
||||
}
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
} catch (Throwable ex) {
|
||||
Log.logError(ex);
|
||||
}
|
||||
}
|
||||
@ -1289,6 +1316,18 @@ class Stream<T> extends ExchangeImpl<T> {
|
||||
int getStreamId() {
|
||||
return streamid;
|
||||
}
|
||||
|
||||
@Override
|
||||
String dbgString() {
|
||||
String dbg = dbgString;
|
||||
if (dbg != null) return dbg;
|
||||
if (streamid == 0) {
|
||||
return connection.dbgString() + ":WindowUpdateSender(stream: ?)";
|
||||
} else {
|
||||
dbg = connection.dbgString() + ":WindowUpdateSender(stream: " + streamid + ")";
|
||||
return dbgString = dbg;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
package jdk.internal.net.http;
|
||||
|
||||
import jdk.internal.net.http.common.FlowTube;
|
||||
import jdk.internal.net.http.common.Logger;
|
||||
import jdk.internal.net.http.frame.SettingsFrame;
|
||||
import jdk.internal.net.http.frame.WindowUpdateFrame;
|
||||
@ -66,8 +67,9 @@ abstract class WindowUpdateSender {
|
||||
abstract int getStreamId();
|
||||
|
||||
void update(int delta) {
|
||||
if (debug.on()) debug.log("update: %d", delta);
|
||||
if (received.addAndGet(delta) > limit) {
|
||||
int rcv = received.addAndGet(delta);
|
||||
if (debug.on()) debug.log("update: %d, received: %d, limit: %d", delta, rcv, limit);
|
||||
if (rcv > limit) {
|
||||
synchronized (this) {
|
||||
int tosend = received.get();
|
||||
if( tosend > limit) {
|
||||
@ -83,8 +85,18 @@ abstract class WindowUpdateSender {
|
||||
connection.sendUnorderedFrame(new WindowUpdateFrame(getStreamId(), delta));
|
||||
}
|
||||
|
||||
volatile String dbgString;
|
||||
String dbgString() {
|
||||
return "WindowUpdateSender(stream: " + getStreamId() + ")";
|
||||
String dbg = dbgString;
|
||||
if (dbg != null) return dbg;
|
||||
FlowTube tube = connection.connection.getConnectionFlow();
|
||||
if (tube == null) {
|
||||
return "WindowUpdateSender(stream: " + getStreamId() + ")";
|
||||
} else {
|
||||
int streamId = getStreamId();
|
||||
dbg = connection.dbgString() + ":WindowUpdateSender(stream: " + streamId + ")";
|
||||
return streamId == 0 ? dbg : (dbgString = dbg);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -33,6 +33,9 @@ import javax.net.ssl.SSLEngineResult.HandshakeStatus;
|
||||
import javax.net.ssl.SSLEngineResult.Status;
|
||||
import javax.net.ssl.SSLException;
|
||||
import java.io.IOException;
|
||||
import java.lang.ref.Reference;
|
||||
import java.lang.ref.ReferenceQueue;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
@ -93,6 +96,8 @@ public class SSLFlowDelegate {
|
||||
// When handshake is in progress trying to wrap may produce no bytes.
|
||||
private static final ByteBuffer NOTHING = ByteBuffer.allocate(0);
|
||||
private static final String monProp = Utils.getProperty("jdk.internal.httpclient.monitorFlowDelegate");
|
||||
private static final boolean isMonitored =
|
||||
monProp != null && (monProp.equals("") || monProp.equalsIgnoreCase("true"));
|
||||
|
||||
final Executor exec;
|
||||
final Reader reader;
|
||||
@ -100,6 +105,7 @@ public class SSLFlowDelegate {
|
||||
final SSLEngine engine;
|
||||
final String tubeName; // hack
|
||||
final CompletableFuture<String> alpnCF; // completes on initial handshake
|
||||
final Monitorable monitor = isMonitored ? this::monitor : null; // prevent GC until SSLFD is stopped
|
||||
volatile boolean close_notify_received;
|
||||
final CompletableFuture<Void> readerCF;
|
||||
final CompletableFuture<Void> writerCF;
|
||||
@ -152,8 +158,7 @@ public class SSLFlowDelegate {
|
||||
// Writer to the downWriter.
|
||||
connect(downReader, downWriter);
|
||||
|
||||
if (monProp != null && (monProp.equals("") || monProp.equalsIgnoreCase("true")))
|
||||
Monitor.add(this::monitor);
|
||||
if (isMonitored) Monitor.add(monitor);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -202,6 +207,7 @@ public class SSLFlowDelegate {
|
||||
public String monitor() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("SSL: id ").append(id);
|
||||
sb.append(" ").append(dbgString());
|
||||
sb.append(" HS state: " + states(handshakeState));
|
||||
sb.append(" Engine state: " + engine.getHandshakeStatus().toString());
|
||||
if (stateList != null) {
|
||||
@ -293,8 +299,10 @@ public class SSLFlowDelegate {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "READER: " + super.toString() + " readBuf: " + readBuf.toString()
|
||||
+ " count: " + count.toString();
|
||||
return "READER: " + super.toString() + ", readBuf: " + readBuf.toString()
|
||||
+ ", count: " + count.toString() + ", scheduler: "
|
||||
+ (scheduler.isStopped() ? "stopped" : "running")
|
||||
+ ", status: " + lastUnwrapStatus;
|
||||
}
|
||||
|
||||
private void reallocReadBuf() {
|
||||
@ -335,6 +343,7 @@ public class SSLFlowDelegate {
|
||||
}
|
||||
if (complete) {
|
||||
this.completing = complete;
|
||||
minBytesRequired = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -395,13 +404,23 @@ public class SSLFlowDelegate {
|
||||
// not enough data in the read buffer...
|
||||
// no need to try to unwrap again unless we get more bytes
|
||||
// than minBytesRequired = len in the read buffer.
|
||||
minBytesRequired = len;
|
||||
synchronized (readBufferLock) {
|
||||
minBytesRequired = len;
|
||||
// more bytes could already have been added...
|
||||
assert readBuf.remaining() >= len;
|
||||
// check if we have received some data, and if so
|
||||
// we can just re-spin the loop
|
||||
if (readBuf.remaining() > len) continue;
|
||||
else if (this.completing) {
|
||||
if (debug.on()) {
|
||||
debugr.log("BUFFER_UNDERFLOW with EOF," +
|
||||
" %d bytes non decrypted.", len);
|
||||
}
|
||||
// The channel won't send us any more data, and
|
||||
// we are in underflow: we need to fail.
|
||||
throw new IOException("BUFFER_UNDERFLOW with EOF, "
|
||||
+ len + " bytes non decrypted.");
|
||||
}
|
||||
}
|
||||
// request more data and return.
|
||||
requestMore();
|
||||
@ -429,6 +448,7 @@ public class SSLFlowDelegate {
|
||||
} catch (IOException ex) {
|
||||
errorCommon(ex);
|
||||
handleError(ex);
|
||||
return;
|
||||
}
|
||||
if (handshaking && !complete)
|
||||
return;
|
||||
@ -452,12 +472,13 @@ public class SSLFlowDelegate {
|
||||
}
|
||||
}
|
||||
|
||||
private volatile Status lastUnwrapStatus;
|
||||
EngineResult unwrapBuffer(ByteBuffer src) throws IOException {
|
||||
ByteBuffer dst = getAppBuffer();
|
||||
int len = src.remaining();
|
||||
while (true) {
|
||||
SSLEngineResult sslResult = engine.unwrap(src, dst);
|
||||
switch (sslResult.getStatus()) {
|
||||
switch (lastUnwrapStatus = sslResult.getStatus()) {
|
||||
case BUFFER_OVERFLOW:
|
||||
// may happen if app size buffer was changed, or if
|
||||
// our 'adaptiveBufferSize' guess was too small for
|
||||
@ -507,7 +528,9 @@ public class SSLFlowDelegate {
|
||||
}
|
||||
|
||||
public static class Monitor extends Thread {
|
||||
final List<Monitorable> list;
|
||||
final List<WeakReference<Monitorable>> list;
|
||||
final List<FinalMonitorable> finalList;
|
||||
final ReferenceQueue<Monitorable> queue = new ReferenceQueue<>();
|
||||
static Monitor themon;
|
||||
|
||||
static {
|
||||
@ -515,19 +538,61 @@ public class SSLFlowDelegate {
|
||||
themon.start(); // uncomment to enable Monitor
|
||||
}
|
||||
|
||||
// An instance used to temporarily store the
|
||||
// last observable state of a monitorable object.
|
||||
// When Monitor.remove(o) is called, we replace
|
||||
// 'o' with a FinalMonitorable whose reference
|
||||
// will be enqueued after the last observable state
|
||||
// has been printed.
|
||||
final class FinalMonitorable implements Monitorable {
|
||||
final String finalState;
|
||||
FinalMonitorable(Monitorable o) {
|
||||
finalState = o.getInfo();
|
||||
finalList.add(this);
|
||||
}
|
||||
@Override
|
||||
public String getInfo() {
|
||||
finalList.remove(this);
|
||||
return finalState;
|
||||
}
|
||||
}
|
||||
|
||||
Monitor() {
|
||||
super("Monitor");
|
||||
setDaemon(true);
|
||||
list = Collections.synchronizedList(new LinkedList<>());
|
||||
finalList = new ArrayList<>(); // access is synchronized on list above
|
||||
}
|
||||
|
||||
void addTarget(Monitorable o) {
|
||||
list.add(o);
|
||||
list.add(new WeakReference<>(o, queue));
|
||||
}
|
||||
void removeTarget(Monitorable o) {
|
||||
// It can take a long time for GC to clean up references.
|
||||
// Calling Monitor.remove() early helps removing noise from the
|
||||
// logs/
|
||||
synchronized (list) {
|
||||
Iterator<WeakReference<Monitorable>> it = list.iterator();
|
||||
while (it.hasNext()) {
|
||||
Monitorable m = it.next().get();
|
||||
if (m == null) it.remove();
|
||||
if (o == m) {
|
||||
it.remove();
|
||||
break;
|
||||
}
|
||||
}
|
||||
FinalMonitorable m = new FinalMonitorable(o);
|
||||
addTarget(m);
|
||||
Reference.reachabilityFence(m);
|
||||
}
|
||||
}
|
||||
|
||||
public static void add(Monitorable o) {
|
||||
themon.addTarget(o);
|
||||
}
|
||||
public static void remove(Monitorable o) {
|
||||
themon.removeTarget(o);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
@ -536,7 +601,14 @@ public class SSLFlowDelegate {
|
||||
while (true) {
|
||||
Thread.sleep(20 * 1000);
|
||||
synchronized (list) {
|
||||
for (Monitorable o : list) {
|
||||
Reference<? extends Monitorable> expired;
|
||||
while ((expired = queue.poll()) != null) list.remove(expired);
|
||||
for (WeakReference<Monitorable> ref : list) {
|
||||
Monitorable o = ref.get();
|
||||
if (o == null) continue;
|
||||
if (o instanceof FinalMonitorable) {
|
||||
ref.enqueue();
|
||||
}
|
||||
System.out.println(o.getInfo());
|
||||
System.out.println("-------------------------");
|
||||
}
|
||||
@ -733,6 +805,7 @@ public class SSLFlowDelegate {
|
||||
// downstream. Otherwise, we send the writeBuffer downstream
|
||||
// and will allocate a new one next time.
|
||||
volatile ByteBuffer writeBuffer;
|
||||
private volatile Status lastWrappedStatus;
|
||||
@SuppressWarnings("fallthrough")
|
||||
EngineResult wrapBuffers(ByteBuffer[] src) throws SSLException {
|
||||
long len = Utils.remaining(src);
|
||||
@ -747,7 +820,7 @@ public class SSLFlowDelegate {
|
||||
while (true) {
|
||||
SSLEngineResult sslResult = engine.wrap(src, dst);
|
||||
if (debugw.on()) debugw.log("SSLResult: " + sslResult);
|
||||
switch (sslResult.getStatus()) {
|
||||
switch (lastWrappedStatus = sslResult.getStatus()) {
|
||||
case BUFFER_OVERFLOW:
|
||||
// Shouldn't happen. We allocated buffer with packet size
|
||||
// get it again if net buffer size was changed
|
||||
@ -815,8 +888,10 @@ public class SSLFlowDelegate {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WRITER: " + super.toString() +
|
||||
" writeList size " + Integer.toString(writeList.size());
|
||||
return "WRITER: " + super.toString()
|
||||
+ ", writeList size: " + Integer.toString(writeList.size())
|
||||
+ ", scheduler: " + (scheduler.isStopped() ? "stopped" : "running")
|
||||
+ ", status: " + lastWrappedStatus;
|
||||
//" writeList: " + writeList.toString();
|
||||
}
|
||||
}
|
||||
@ -839,6 +914,7 @@ public class SSLFlowDelegate {
|
||||
stopped = true;
|
||||
reader.stop();
|
||||
writer.stop();
|
||||
if (isMonitored) Monitor.remove(monitor);
|
||||
}
|
||||
|
||||
private Void stopOnError(Throwable currentlyUnused) {
|
||||
@ -953,6 +1029,10 @@ public class SSLFlowDelegate {
|
||||
case NEED_UNWRAP_AGAIN:
|
||||
// do nothing else
|
||||
// receiving-side data will trigger unwrap
|
||||
if (caller == WRITER) {
|
||||
reader.schedule();
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new InternalError("Unexpected handshake status:"
|
||||
|
@ -406,6 +406,21 @@ public class SSLTube implements FlowTube {
|
||||
}
|
||||
}
|
||||
|
||||
private void complete(DelegateWrapper subscriberImpl, Throwable t) {
|
||||
try {
|
||||
if (t == null) subscriberImpl.onComplete();
|
||||
else subscriberImpl.onError(t);
|
||||
if (debug.on()) {
|
||||
debug.log("subscriber completed %s"
|
||||
+ ((t == null) ? "normally" : ("with error: " + t)));
|
||||
}
|
||||
} finally {
|
||||
// Error or EOF while reading:
|
||||
// cancel write side after completing read side
|
||||
writeSubscription.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
private void onNewSubscription(DelegateWrapper subscriberImpl,
|
||||
Flow.Subscription subscription) {
|
||||
assert subscriberImpl != null;
|
||||
@ -432,13 +447,13 @@ public class SSLTube implements FlowTube {
|
||||
if (debug.on())
|
||||
debug.log("onNewSubscription: subscriberImpl:%s, invoking onError:%s",
|
||||
subscriberImpl, failed);
|
||||
subscriberImpl.onError(failed);
|
||||
complete(subscriberImpl, failed);
|
||||
} else if (completed) {
|
||||
if (debug.on())
|
||||
debug.log("onNewSubscription: subscriberImpl:%s, invoking onCompleted",
|
||||
subscriberImpl);
|
||||
finished = true;
|
||||
subscriberImpl.onComplete();
|
||||
complete(subscriberImpl, null);
|
||||
}
|
||||
}
|
||||
|
||||
@ -463,7 +478,7 @@ public class SSLTube implements FlowTube {
|
||||
subscriberImpl = subscribed;
|
||||
}
|
||||
if (subscriberImpl != null) {
|
||||
subscriberImpl.onError(failed);
|
||||
complete(subscriberImpl, failed);
|
||||
} else {
|
||||
if (debug.on())
|
||||
debug.log("%s: delegate null, stored %s", this, failed);
|
||||
@ -485,14 +500,22 @@ public class SSLTube implements FlowTube {
|
||||
return !(hs == NOT_HANDSHAKING || hs == FINISHED);
|
||||
}
|
||||
|
||||
private boolean handshakeFailed() {
|
||||
private String handshakeFailed() {
|
||||
// sslDelegate can be null if we reach here
|
||||
// during the initial handshake, as that happens
|
||||
// within the SSLFlowDelegate constructor.
|
||||
// In that case we will want to raise an exception.
|
||||
return handshaking()
|
||||
if (handshaking()
|
||||
&& (sslDelegate == null
|
||||
|| !sslDelegate.closeNotifyReceived());
|
||||
|| !sslDelegate.closeNotifyReceived())) {
|
||||
return "Remote host terminated the handshake";
|
||||
}
|
||||
// The initial handshake may not have been started yet.
|
||||
// In which case - if we are completed before the initial handshake
|
||||
// is started, we consider this a handshake failure as well.
|
||||
if ("SSL_NULL_WITH_NULL_NULL".equals(engine.getSession().getCipherSuite()))
|
||||
return "Remote host closed the channel";
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -503,17 +526,18 @@ public class SSLTube implements FlowTube {
|
||||
subscriberImpl = subscribed;
|
||||
}
|
||||
|
||||
if (handshakeFailed()) {
|
||||
String handshakeFailed = handshakeFailed();
|
||||
if (handshakeFailed != null) {
|
||||
if (debug.on())
|
||||
debug.log("handshake: %s, inbound done: %s outbound done: %s",
|
||||
debug.log("handshake: %s, inbound done: %s, outbound done: %s: %s",
|
||||
engine.getHandshakeStatus(),
|
||||
engine.isInboundDone(),
|
||||
engine.isOutboundDone());
|
||||
onErrorImpl(new SSLHandshakeException(
|
||||
"Remote host terminated the handshake"));
|
||||
engine.isOutboundDone(),
|
||||
handshakeFailed);
|
||||
onErrorImpl(new SSLHandshakeException(handshakeFailed));
|
||||
} else if (subscriberImpl != null) {
|
||||
onCompleteReceived = finished = true;
|
||||
subscriberImpl.onComplete();
|
||||
complete(subscriberImpl, null);
|
||||
} else {
|
||||
onCompleteReceived = true;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user