8229919: Support JNI Critical functions in object pinning API on x86_32 platforms

Reviewed-by: rkennke, adinn
This commit is contained in:
Zhengyu Gu 2019-10-15 22:22:23 -04:00
parent 9dd506f713
commit 0046f1ea4f
3 changed files with 145 additions and 8 deletions

View File

@ -1304,6 +1304,97 @@ static void save_or_restore_arguments(MacroAssembler* masm,
}
}
// Registers need to be saved for runtime call
static Register caller_saved_registers[] = {
rcx, rdx, rsi, rdi
};
// Save caller saved registers except r1 and r2
static void save_registers_except(MacroAssembler* masm, Register r1, Register r2) {
int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
for (int index = 0; index < reg_len; index ++) {
Register this_reg = caller_saved_registers[index];
if (this_reg != r1 && this_reg != r2) {
__ push(this_reg);
}
}
}
// Restore caller saved registers except r1 and r2
static void restore_registers_except(MacroAssembler* masm, Register r1, Register r2) {
int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
for (int index = reg_len - 1; index >= 0; index --) {
Register this_reg = caller_saved_registers[index];
if (this_reg != r1 && this_reg != r2) {
__ pop(this_reg);
}
}
}
// Pin object, return pinned object or null in rax
static void gen_pin_object(MacroAssembler* masm,
Register thread, VMRegPair reg) {
__ block_comment("gen_pin_object {");
Label is_null;
Register tmp_reg = rax;
VMRegPair tmp(tmp_reg->as_VMReg());
if (reg.first()->is_stack()) {
// Load the arg up from the stack
simple_move32(masm, reg, tmp);
reg = tmp;
} else {
__ movl(tmp_reg, reg.first()->as_Register());
}
__ testptr(reg.first()->as_Register(), reg.first()->as_Register());
__ jccb(Assembler::equal, is_null);
// Save registers that may be used by runtime call
Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
save_registers_except(masm, arg, thread);
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
thread, reg.first()->as_Register());
// Restore saved registers
restore_registers_except(masm, arg, thread);
__ bind(is_null);
__ block_comment("} gen_pin_object");
}
// Unpin object
static void gen_unpin_object(MacroAssembler* masm,
Register thread, VMRegPair reg) {
__ block_comment("gen_unpin_object {");
Label is_null;
// temp register
__ push(rax);
Register tmp_reg = rax;
VMRegPair tmp(tmp_reg->as_VMReg());
simple_move32(masm, reg, tmp);
__ testptr(rax, rax);
__ jccb(Assembler::equal, is_null);
// Save registers that may be used by runtime call
Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
save_registers_except(masm, arg, thread);
__ call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
thread, rax);
// Restore saved registers
restore_registers_except(masm, arg, thread);
__ bind(is_null);
__ pop(rax);
__ block_comment("} gen_unpin_object");
}
// Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
@ -1837,7 +1928,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ get_thread(thread);
if (is_critical_native) {
if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
oop_handle_offset, oop_maps, in_regs, in_sig_bt);
}
@ -1875,6 +1966,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
//
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
// Inbound arguments that need to be pinned for critical natives
GrowableArray<int> pinned_args(total_in_args);
// Current stack slot for storing register based array argument
int pinned_slot = oop_handle_offset;
// Mark location of rbp,
// map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
@ -1886,7 +1982,28 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
switch (in_sig_bt[i]) {
case T_ARRAY:
if (is_critical_native) {
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
VMRegPair in_arg = in_regs[i];
if (Universe::heap()->supports_object_pinning()) {
// gen_pin_object handles save and restore
// of any clobbered registers
gen_pin_object(masm, thread, in_arg);
pinned_args.append(i);
// rax has pinned array
VMRegPair result_reg(rax->as_VMReg());
if (!in_arg.first()->is_stack()) {
assert(pinned_slot <= stack_slots, "overflow");
simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
pinned_slot += VMRegImpl::slots_per_word;
} else {
// Write back pinned value, it will be used to unpin this argument
__ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register());
}
// We have the array in register, use it
in_arg = result_reg;
}
unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
c_arg++;
break;
}
@ -2079,6 +2196,26 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
default : ShouldNotReachHere();
}
// unpin pinned arguments
pinned_slot = oop_handle_offset;
if (pinned_args.length() > 0) {
// save return value that may be overwritten otherwise.
save_native_result(masm, ret_type, stack_slots);
for (int index = 0; index < pinned_args.length(); index ++) {
int i = pinned_args.at(index);
assert(pinned_slot <= stack_slots, "overflow");
if (!in_regs[i].first()->is_stack()) {
int offset = pinned_slot * VMRegImpl::stack_slot_size;
__ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
pinned_slot += VMRegImpl::slots_per_word;
}
// gen_pin_object handles save and restore
// of any other clobbered registers
gen_unpin_object(masm, thread, in_regs[i]);
}
restore_native_result(masm, ret_type, stack_slots);
}
// Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization
// state is not atomic w.r.t. GC, as this scenario demonstrates:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
* Copyright (c) 2018, 2019, Red Hat, Inc. and/or its affiliates.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
@ -28,7 +28,7 @@ package gc;
* @key gc
* @bug 8199868
* @library /
* @requires (os.arch =="x86_64" | os.arch == "amd64") & vm.gc.Epsilon & !vm.graal.enabled
* @requires (os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386") & vm.gc.Epsilon & !vm.graal.enabled
* @summary test argument unpacking nmethod wrapper of critical native method
* @run main/othervm/native -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC -Xcomp -Xmx256M -XX:+CriticalJNINatives gc.CriticalNativeArgs
*/
@ -38,7 +38,7 @@ package gc;
* @key gc
* @bug 8199868
* @library /
* @requires (os.arch =="x86_64" | os.arch == "amd64") & vm.gc.Shenandoah & !vm.graal.enabled
* @requires (os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386") & vm.gc.Shenandoah & !vm.graal.enabled
* @summary test argument unpacking nmethod wrapper of critical native method
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:+ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.CriticalNativeArgs
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:-ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.CriticalNativeArgs

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
* Copyright (c) 2018, 2019, Red Hat, Inc. and/or its affiliates.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
@ -31,7 +31,7 @@ import gc.CriticalNative;
* @key gc
* @bug 8199868
* @library /
* @requires (os.arch =="x86_64" | os.arch == "amd64") & vm.gc.Epsilon & !vm.graal.enabled
* @requires (os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386") & vm.gc.Epsilon & !vm.graal.enabled
* @summary test argument pinning by nmethod wrapper of critical native method
* @run main/othervm/native -XX:+UnlockExperimentalVMOptions -XX:+UseEpsilonGC -Xcomp -Xmx1G -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
*/
@ -41,7 +41,7 @@ import gc.CriticalNative;
* @key gc
* @bug 8199868
* @library /
* @requires (os.arch =="x86_64" | os.arch == "amd64") & vm.gc.Shenandoah & !vm.graal.enabled
* @requires (os.arch =="x86_64" | os.arch == "amd64" | os.arch=="x86" | os.arch=="i386") & vm.gc.Shenandoah & !vm.graal.enabled
* @summary test argument pinning by nmethod wrapper of critical native method
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:-ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress
* @run main/othervm/native -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseShenandoahGC -XX:ShenandoahGCMode=passive -XX:+ShenandoahDegeneratedGC -Xcomp -Xmx512M -XX:+CriticalJNINatives gc.stress.CriticalNativeStress