Skip to content

Commit

Permalink
8251462: Simplify compilation policy
Browse files Browse the repository at this point in the history
Reviewed-by: cjplummer, kvn
  • Loading branch information
Igor Veresov committed Jan 28, 2021
1 parent 71128cf commit 1519632
Show file tree
Hide file tree
Showing 98 changed files with 2,320 additions and 3,795 deletions.
4 changes: 2 additions & 2 deletions src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -1411,7 +1411,7 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
// membar it's possible for a simple Dekker test to fail if loads
// use LD;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and C1 compiles the loads in another.
if (!is_c1_or_interpreter_only()) {
if (!CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
__ membar();
}
__ volatile_load_mem_reg(address, result, info);
Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/cpu/aarch64/c1_globals_aarch64.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -32,7 +32,7 @@
// Sets the default values for platform dependent flags used by the client compiler.
// (see c1_globals.hpp)

#ifndef TIERED
#ifndef COMPILER2
define_pd_global(bool, BackgroundCompilation, true );
define_pd_global(bool, InlineIntrinsics, true );
define_pd_global(bool, PreferInterpreterNativeStubs, false);
Expand All @@ -56,7 +56,7 @@ define_pd_global(uintx, MetaspaceSize, 12*M );
define_pd_global(bool, NeverActAsServerClassMachine, true );
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
define_pd_global(bool, CICompileOSR, true );
#endif // !TIERED
#endif // !COMPILER2
define_pd_global(bool, UseTypeProfile, false);

define_pd_global(bool, OptimizeSinglePrecision, true );
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -39,7 +39,7 @@ define_pd_global(bool, PreferInterpreterNativeStubs, false);
define_pd_global(bool, ProfileTraps, true);
define_pd_global(bool, UseOnStackReplacement, true);
define_pd_global(bool, ProfileInterpreter, true);
define_pd_global(bool, TieredCompilation, trueInTiered);
define_pd_global(bool, TieredCompilation, COMPILER1_PRESENT(true) NOT_COMPILER1(false));
define_pd_global(intx, CompileThreshold, 10000);

define_pd_global(intx, OnStackReplacePercentage, 140);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {

ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, result);

if (is_c1_or_interpreter_only()) {
if (CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
// The membar here is necessary to prevent reordering between the
// release store in the CAS above and a subsequent volatile load.
// However for tiered compilation C1 inserts a full barrier before
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/aarch64/globals_aarch64.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
Expand Down Expand Up @@ -36,7 +36,7 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for im
define_pd_global(bool, TrapBasedNullChecks, false);
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast

define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineFrequencyCount, 100);
Expand Down
115 changes: 24 additions & 91 deletions src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -589,82 +589,31 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(
//
// rmethod: method
//
void TemplateInterpreterGenerator::generate_counter_incr(
Label* overflow,
Label* profile_method,
Label* profile_method_continue) {
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
Label done;
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
if (TieredCompilation) {
int increment = InvocationCounter::count_increment;
Label no_mdo;
if (ProfileInterpreter) {
// Are we profiling?
__ ldr(r0, Address(rmethod, Method::method_data_offset()));
__ cbz(r0, no_mdo);
// Increment counter in the MDO
const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
__ b(done);
}
__ bind(no_mdo);
// Increment counter in MethodCounters
const Address invocation_counter(rscratch2,
MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());
__ get_method_counters(rmethod, rscratch2, done);
const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
__ bind(done);
} else { // not TieredCompilation
const Address backedge_counter(rscratch2,
MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset());
const Address invocation_counter(rscratch2,
MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());

__ get_method_counters(rmethod, rscratch2, done);

if (ProfileInterpreter) { // %%% Merge this into MethodData*
__ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
__ addw(r1, r1, 1);
__ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
}
// Update standard invocation counters
__ ldrw(r1, invocation_counter);
__ ldrw(r0, backedge_counter);

__ addw(r1, r1, InvocationCounter::count_increment);
__ andw(r0, r0, InvocationCounter::count_mask_value);

__ strw(r1, invocation_counter);
__ addw(r0, r0, r1); // add both counters

// profile_method is non-null only for interpreted method so
// profile_method != NULL == !native_call

if (ProfileInterpreter && profile_method != NULL) {
// Test to see if we should create a method data oop
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
__ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
__ cmpw(r0, rscratch2);
__ br(Assembler::LT, *profile_method_continue);

// if no method data exists, go to profile_method
__ test_method_data_pointer(rscratch2, *profile_method);
}

{
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
__ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
__ cmpw(r0, rscratch2);
__ br(Assembler::HS, *overflow);
}
__ bind(done);
int increment = InvocationCounter::count_increment;
Label no_mdo;
if (ProfileInterpreter) {
// Are we profiling?
__ ldr(r0, Address(rmethod, Method::method_data_offset()));
__ cbz(r0, no_mdo);
// Increment counter in the MDO
const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
in_bytes(InvocationCounter::counter_offset()));
const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
__ b(done);
}
__ bind(no_mdo);
// Increment counter in MethodCounters
const Address invocation_counter(rscratch2,
MethodCounters::invocation_counter_offset() +
InvocationCounter::counter_offset());
__ get_method_counters(rmethod, rscratch2, done);
const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
__ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
__ bind(done);
}

void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
Expand Down Expand Up @@ -1205,7 +1154,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
// increment invocation count & check for overflow
Label invocation_counter_overflow;
if (inc_counter) {
generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
generate_counter_incr(&invocation_counter_overflow);
}

Label continue_after_compile;
Expand Down Expand Up @@ -1649,15 +1598,8 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {

// increment invocation count & check for overflow
Label invocation_counter_overflow;
Label profile_method;
Label profile_method_continue;
if (inc_counter) {
generate_counter_incr(&invocation_counter_overflow,
&profile_method,
&profile_method_continue);
if (ProfileInterpreter) {
__ bind(profile_method_continue);
}
generate_counter_incr(&invocation_counter_overflow);
}

Label continue_after_compile;
Expand Down Expand Up @@ -1709,15 +1651,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {

// invocation counter overflow
if (inc_counter) {
if (ProfileInterpreter) {
// We have decided to profile this method in the interpreter
__ bind(profile_method);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
__ set_method_data_pointer_for_bcp();
// don't think we need this
__ get_method(r1);
__ b(profile_method_continue);
}
// Handle overflow of counter and compile method
__ bind(invocation_counter_overflow);
generate_counter_overflow(continue_after_compile);
Expand Down
Loading

0 comments on commit 1519632

Please sign in to comment.