diff --git a/compiler/src/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/HotSpotAllocationSnippets.java b/compiler/src/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/HotSpotAllocationSnippets.java index 90cee9422e94..dab32469e387 100644 --- a/compiler/src/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/HotSpotAllocationSnippets.java +++ b/compiler/src/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/HotSpotAllocationSnippets.java @@ -352,7 +352,7 @@ static boolean useNullAllocationStubs(@InjectedParameter GraalHotSpotVMConfig co } @Override - protected final Object callNewArrayStub(Word hub, int length, int fillStartOffset) { + protected final Object callNewArrayStub(Word hub, int length) { KlassPointer klassPtr = KlassPointer.fromWord(hub); if (useNullAllocationStubs(INJECTED_VMCONFIG)) { return nonNullOrDeopt(newArrayOrNull(NEW_ARRAY_OR_NULL, klassPtr, length)); diff --git a/compiler/src/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/AllocationSnippets.java b/compiler/src/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/AllocationSnippets.java index b8cb494e5189..c3cba7fe5a55 100644 --- a/compiler/src/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/AllocationSnippets.java +++ b/compiler/src/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/AllocationSnippets.java @@ -93,11 +93,11 @@ public Object allocateArrayImpl(Word hub, if (useTLAB() && probability(FAST_PATH_PROBABILITY, shouldAllocateInTLAB(allocationSize, true)) && probability(FAST_PATH_PROBABILITY, newTop.belowOrEqual(end))) { writeTlabTop(thread, newTop); emitPrefetchAllocate(newTop, true); - result = formatArray(hub, allocationSize, length, top, fillContents, fillStartOffset, emitMemoryBarrier, maybeUnroll, supportsBulkZeroing, supportsOptimizedFilling, + result = formatArray(hub, allocationSize, length, top, fillContents, emitMemoryBarrier, fillStartOffset, maybeUnroll, supportsBulkZeroing, supportsOptimizedFilling, profilingData.snippetCounters); } else { profilingData.snippetCounters.stub.inc(); - result = callNewArrayStub(hub, length, fillStartOffset); + result = callNewArrayStub(hub, length); } profileAllocation(profilingData, allocationSize); return verifyOop(result); @@ -124,8 +124,7 @@ protected UnsignedWord arrayAllocationSize(int length, int arrayBaseOffset, int public static long arrayAllocationSize(int length, int arrayBaseOffset, int log2ElementSize, int alignment) { long size = ((length & 0xFFFFFFFFL) << log2ElementSize) + arrayBaseOffset + (alignment - 1); long mask = ~(alignment - 1); - long result = size & mask; - return result; + return size & mask; } /** @@ -268,11 +267,7 @@ public Object formatObject(Word hub, AllocationSnippetCounters snippetCounters) { initializeObjectHeader(memory, hub, false); int headerSize = instanceHeaderSize(); - if (fillContents == FillContent.WITH_ZEROES) { - zeroMemory(memory, headerSize, size, constantSize, false, false, false, snippetCounters); - } else if (REPLACEMENTS_ASSERTIONS_ENABLED && fillContents == FillContent.WITH_GARBAGE_IF_ASSERTIONS_ENABLED) { - fillWithGarbage(memory, headerSize, size, constantSize, false, false, snippetCounters); - } + fillContents(memory, fillContents, headerSize, size, constantSize, false, false, false, snippetCounters); emitMemoryBarrierIf(emitMemoryBarrier); return memory.toObjectNonNull(); } @@ -285,23 +280,33 @@ public Object formatArray(Word hub, int length, Word memory, FillContent fillContents, - int fillStartOffset, boolean emitMemoryBarrier, + int fillStartOffset, boolean maybeUnroll, boolean supportsBulkZeroing, boolean supportsOptimizedFilling, AllocationSnippetCounters snippetCounters) { - memory.writeInt(arrayLengthOffset(), length, LocationIdentity.init()); - // Store hub last as the concurrent garbage collectors assume length is valid if hub field - // is not null. + /* + * For TLAB allocations, the initialization order does not matter. Therefore, it is also not + * necessary to use STORE_RELEASE semantics when storing the hub into the newly allocated + * object. This is a major difference to the slow-path allocation where the initialization + * order and the STORE_RELEASE semantics are crucial for concurrent GCs (the slow-path + * allocation can directly allocate in the old generation). + */ initializeObjectHeader(memory, hub, true); + memory.writeInt(arrayLengthOffset(), length, LocationIdentity.init()); + fillContents(memory, fillContents, fillStartOffset, allocationSize, false, maybeUnroll, supportsBulkZeroing, supportsOptimizedFilling, snippetCounters); + emitMemoryBarrierIf(emitMemoryBarrier); + return memory.toObjectNonNull(); + } + + private void fillContents(Word memory, FillContent fillContents, int startOffset, UnsignedWord endOffset, boolean isEndOffsetConstant, boolean maybeUnroll, boolean supportsBulkZeroing, + boolean supportsOptimizedFilling, AllocationSnippetCounters snippetCounters) { if (fillContents == FillContent.WITH_ZEROES) { - zeroMemory(memory, fillStartOffset, allocationSize, false, maybeUnroll, supportsBulkZeroing, supportsOptimizedFilling, snippetCounters); + zeroMemory(memory, startOffset, endOffset, isEndOffsetConstant, maybeUnroll, supportsBulkZeroing, supportsOptimizedFilling, snippetCounters); } else if (REPLACEMENTS_ASSERTIONS_ENABLED && fillContents == FillContent.WITH_GARBAGE_IF_ASSERTIONS_ENABLED) { - fillWithGarbage(memory, fillStartOffset, allocationSize, false, maybeUnroll, supportsOptimizedFilling, snippetCounters); + fillWithGarbage(memory, startOffset, endOffset, isEndOffsetConstant, maybeUnroll, supportsOptimizedFilling, snippetCounters); } - emitMemoryBarrierIf(emitMemoryBarrier); - return memory.toObjectNonNull(); } protected void emitMemoryBarrierIf(boolean emitMemoryBarrier) { @@ -351,7 +356,7 @@ public void emitPrefetchAllocate(Word address, boolean isArray) { protected abstract Object callNewInstanceStub(Word hub); - protected abstract Object callNewArrayStub(Word hub, int length, int fillStartOffset); + protected abstract Object callNewArrayStub(Word hub, int length); protected abstract Object callNewMultiArrayStub(Word hub, int rank, Word dims); diff --git a/compiler/src/org.graalvm.compiler.truffle.runtime.serviceprovider/src/org/graalvm/compiler/truffle/runtime/serviceprovider/TruffleRuntimeServices.java b/compiler/src/org.graalvm.compiler.truffle.runtime.serviceprovider/src/org/graalvm/compiler/truffle/runtime/serviceprovider/TruffleRuntimeServices.java index c3ec92c09f6e..9eb208e072e9 100644 --- a/compiler/src/org.graalvm.compiler.truffle.runtime.serviceprovider/src/org/graalvm/compiler/truffle/runtime/serviceprovider/TruffleRuntimeServices.java +++ b/compiler/src/org.graalvm.compiler.truffle.runtime.serviceprovider/src/org/graalvm/compiler/truffle/runtime/serviceprovider/TruffleRuntimeServices.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,17 @@ public final class TruffleRuntimeServices { * @param service the service whose provider is being requested */ public static Iterable load(Class service) { - return ServiceLoader.load(service); + Class lookupClass = TruffleRuntimeServices.class; + ModuleLayer moduleLayer = lookupClass.getModule().getLayer(); + Iterable services; + if (moduleLayer != null) { + services = ServiceLoader.load(moduleLayer, service); + } else { + services = ServiceLoader.load(service, lookupClass.getClassLoader()); + } + if (!services.iterator().hasNext()) { + services = ServiceLoader.load(service); + } + return services; } } diff --git a/sdk/llvm-patches/README.md b/sdk/llvm-patches/README.md index 0a0a880ffa8d..bcd9fcbad696 100644 --- a/sdk/llvm-patches/README.md +++ b/sdk/llvm-patches/README.md @@ -4,4 +4,4 @@ LLVM Upstream Patches This directory contains patches which were used to build this LLVM distribution but are not yet upstream or have been backported to LLVM 14. To build this LLVM distribution yourself, apply the patches -on top of an LLVM [14.0.3](https://github.com/llvm/llvm-project/tree/llvmorg-14.0.3) source tree. +on top of an LLVM [14.0.6](https://github.com/llvm/llvm-project/tree/llvmorg-14.0.6) source tree. diff --git a/sdk/llvm-patches/native-image/0001-GR-17692-Statepoints-Support-for-compressed-pointers.patch b/sdk/llvm-patches/native-image/0001-GR-17692-Statepoints-Support-for-compressed-pointers.patch index dce94bd7a812..4f99d2e61ef1 100644 --- a/sdk/llvm-patches/native-image/0001-GR-17692-Statepoints-Support-for-compressed-pointers.patch +++ b/sdk/llvm-patches/native-image/0001-GR-17692-Statepoints-Support-for-compressed-pointers.patch @@ -1,7 +1,7 @@ -From 0122431d63266f033e4fd8abf78e950c124f027f Mon Sep 17 00:00:00 2001 +From b9902a9d9f57ea78d4b11bdfd7295738ef3547fb Mon Sep 17 00:00:00 2001 From: Loic Ottet Date: Mon, 23 Sep 2019 16:55:33 +0200 -Subject: [PATCH 1/2] [GR-17692] [Statepoints] Support for compressed pointers +Subject: [PATCH 1/3] [GR-17692] [Statepoints] Support for compressed pointers in the statepoint emission pass --- @@ -424,5 +424,5 @@ index b795ad3899bc..5a5d1d67e5f4 100644 Out.insert(LiveOut.begin(), LiveOut.end()); } -- -2.33.1 +2.36.0 diff --git a/sdk/llvm-patches/native-image/0002-GR-23578-AArch64-Introduce-option-to-force-placement.patch b/sdk/llvm-patches/native-image/0002-GR-23578-AArch64-Introduce-option-to-force-placement.patch index 597eec641244..5f298fd971e1 100644 --- a/sdk/llvm-patches/native-image/0002-GR-23578-AArch64-Introduce-option-to-force-placement.patch +++ b/sdk/llvm-patches/native-image/0002-GR-23578-AArch64-Introduce-option-to-force-placement.patch @@ -1,7 +1,7 @@ -From 772a7a659e7f2f231920cff828cff00fab198af0 Mon Sep 17 00:00:00 2001 +From ca6facf3f34d646783d082776378da40d06e9035 Mon Sep 17 00:00:00 2001 From: Loic Ottet Date: Tue, 8 Sep 2020 13:03:06 +0200 -Subject: [PATCH 2/2] [GR-23578][AArch64] Introduce option to force placement +Subject: [PATCH 2/3] [GR-23578][AArch64] Introduce option to force placement of the frame record on top of the stack frame --- @@ -34,5 +34,5 @@ index d1b901e58d27..96038fbb8f28 100644 } -- -2.33.1 +2.36.0 diff --git a/sdk/llvm-patches/native-image/0003-GR-36952-RISCV-Add-support-for-RISC-V-Stackmaps-Stat.patch b/sdk/llvm-patches/native-image/0003-GR-36952-RISCV-Add-support-for-RISC-V-Stackmaps-Stat.patch new file mode 100644 index 000000000000..215dcc32078d --- /dev/null +++ b/sdk/llvm-patches/native-image/0003-GR-36952-RISCV-Add-support-for-RISC-V-Stackmaps-Stat.patch @@ -0,0 +1,846 @@ +From c7a4a53c32ccf93dd118fb6c13f2ecf02baeadf4 Mon Sep 17 00:00:00 2001 +From: Sacha Coppey +Date: Fri, 3 Jun 2022 11:41:20 +0200 +Subject: [PATCH 3/3] [GR-36952][RISCV] Add support for RISC-V + Stackmaps/Statepoints/Patchpoints + +--- + llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp | 188 ++++++++- + llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 13 + + llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 21 +- + llvm/test/CodeGen/RISCV/rv64-patchpoint.ll | 68 ++++ + .../RISCV/rv64-stackmap-frame-setup.ll | 22 + + llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll | 18 + + llvm/test/CodeGen/RISCV/rv64-stackmap.ll | 384 ++++++++++++++++++ + 7 files changed, 710 insertions(+), 4 deletions(-) + create mode 100644 llvm/test/CodeGen/RISCV/rv64-patchpoint.ll + create mode 100644 llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll + create mode 100644 llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll + create mode 100644 llvm/test/CodeGen/RISCV/rv64-stackmap.ll + +diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp +index 9fed6e7baadc..3f51693ad338 100644 +--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp ++++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp +@@ -13,6 +13,7 @@ + + #include "MCTargetDesc/RISCVInstPrinter.h" + #include "MCTargetDesc/RISCVMCExpr.h" ++#include "MCTargetDesc/RISCVMatInt.h" + #include "MCTargetDesc/RISCVTargetStreamer.h" + #include "RISCV.h" + #include "RISCVTargetMachine.h" +@@ -23,8 +24,10 @@ + #include "llvm/CodeGen/MachineFunctionPass.h" + #include "llvm/CodeGen/MachineInstr.h" + #include "llvm/CodeGen/MachineModuleInfo.h" ++#include "llvm/CodeGen/StackMaps.h" + #include "llvm/MC/MCAsmInfo.h" + #include "llvm/MC/MCInst.h" ++#include "llvm/MC/MCInstBuilder.h" + #include "llvm/MC/MCStreamer.h" + #include "llvm/MC/MCSymbol.h" + #include "llvm/MC/TargetRegistry.h" +@@ -38,15 +41,26 @@ STATISTIC(RISCVNumInstrsCompressed, + + namespace { + class RISCVAsmPrinter : public AsmPrinter { ++ StackMaps SM; + const MCSubtargetInfo *STI; + + public: + explicit RISCVAsmPrinter(TargetMachine &TM, + std::unique_ptr Streamer) +- : AsmPrinter(TM, std::move(Streamer)), STI(TM.getMCSubtargetInfo()) {} ++ : AsmPrinter(TM, std::move(Streamer)), SM(*this), ++ STI(TM.getMCSubtargetInfo()) {} + + StringRef getPassName() const override { return "RISCV Assembly Printer"; } + ++ void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM, ++ const MachineInstr &MI); ++ ++ void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM, ++ const MachineInstr &MI); ++ ++ void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM, ++ const MachineInstr &MI); ++ + bool runOnMachineFunction(MachineFunction &MF) override; + + void emitInstruction(const MachineInstr *MI) override; +@@ -73,6 +87,160 @@ private: + }; + } + ++void RISCVAsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM, ++ const MachineInstr &MI) { ++ unsigned NOPBytes = STI->getFeatureBits()[RISCV::FeatureStdExtC] ? 2 : 4; ++ unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes(); ++ ++ auto &Ctx = OutStreamer.getContext(); ++ MCSymbol *MILabel = Ctx.createTempSymbol(); ++ OutStreamer.emitLabel(MILabel); ++ ++ SM.recordStackMap(*MILabel, MI); ++ assert(NumNOPBytes % NOPBytes == 0 && "Invalid number of NOP bytes requested!"); ++ ++ // Scan ahead to trim the shadow. ++ const MachineBasicBlock &MBB = *MI.getParent(); ++ MachineBasicBlock::const_iterator MII(MI); ++ ++MII; ++ while (NumNOPBytes > 0) { ++ if (MII == MBB.end() || MII->isCall() || ++ MII->getOpcode() == RISCV::DBG_VALUE || ++ MII->getOpcode() == TargetOpcode::PATCHPOINT || ++ MII->getOpcode() == TargetOpcode::STACKMAP) ++ break; ++ ++MII; ++ NumNOPBytes -= 4; ++ } ++ ++ // Emit nops. ++ emitNops(NumNOPBytes / NOPBytes); ++} ++ ++// Lower a patchpoint of the form: ++// [], , , , ++void RISCVAsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM, ++ const MachineInstr &MI) { ++ unsigned NOPBytes = STI->getFeatureBits()[RISCV::FeatureStdExtC] ? 2 : 4; ++ ++ auto &Ctx = OutStreamer.getContext(); ++ MCSymbol *MILabel = Ctx.createTempSymbol(); ++ OutStreamer.emitLabel(MILabel); ++ SM.recordPatchPoint(*MILabel, MI); ++ ++ PatchPointOpers Opers(&MI); ++ ++ const MachineOperand &CalleeMO = Opers.getCallTarget(); ++ unsigned EncodedBytes = 0; ++ ++ if (CalleeMO.isImm()) { ++ uint64_t CallTarget = CalleeMO.getImm(); ++ if (CallTarget) { ++ assert((CallTarget & 0xFFFF'FFFF'FFFF) == CallTarget && ++ "High 16 bits of call target should be zero."); ++ // Materialize the jump address: ++ ++ RISCVMatInt::InstSeq Seq = ++ RISCVMatInt::generateInstSeq(CallTarget, STI->getFeatureBits()); ++ assert(!Seq.empty()); ++ ++ Register SrcReg = RISCV::X0; ++ Register DstReg = RISCV::X1; ++ for (RISCVMatInt::Inst &Inst : Seq) { ++ if (Inst.Opc == RISCV::LUI) { ++ EmitToStreamer( ++ OutStreamer, ++ MCInstBuilder(RISCV::LUI).addReg(DstReg).addImm(Inst.Imm)); ++ } else if (Inst.Opc == RISCV::ADD_UW) { ++ EmitToStreamer(OutStreamer, MCInstBuilder(RISCV::ADD_UW) ++ .addReg(DstReg) ++ .addReg(SrcReg) ++ .addReg(RISCV::X0)); ++ } else if (Inst.Opc == RISCV::SH1ADD || Inst.Opc == RISCV::SH2ADD || ++ Inst.Opc == RISCV::SH3ADD) { ++ EmitToStreamer( ++ OutStreamer, ++ MCInstBuilder(Inst.Opc).addReg(DstReg).addReg(SrcReg).addReg( ++ SrcReg)); ++ } else { ++ EmitToStreamer( ++ OutStreamer, ++ MCInstBuilder(Inst.Opc).addReg(DstReg).addReg(SrcReg).addImm( ++ Inst.Imm)); ++ } ++ EncodedBytes += 4; ++ // Only the first instruction has X0 as its source. ++ SrcReg = DstReg; ++ } ++ EmitToStreamer(OutStreamer, MCInstBuilder(RISCV::JALR) ++ .addReg(RISCV::X1) ++ .addReg(RISCV::X1) ++ .addImm(0)); ++ EncodedBytes += 4; ++ } ++ } else if (CalleeMO.isGlobal()) { ++ MCOperand CallTargetMCOp; ++ llvm::LowerRISCVMachineOperandToMCOperand(CalleeMO, CallTargetMCOp, *this); ++ EmitToStreamer(OutStreamer, ++ MCInstBuilder(RISCV::PseudoCALL).addOperand(CallTargetMCOp)); ++ EncodedBytes += 8; ++ } ++ ++ // Emit padding. ++ unsigned NumBytes = Opers.getNumPatchBytes(); ++ assert(NumBytes >= EncodedBytes && ++ "Patchpoint can't request size less than the length of a call."); ++ assert((NumBytes - EncodedBytes) % NOPBytes == 0 && ++ "Invalid number of NOP bytes requested!"); ++ emitNops((NumBytes - EncodedBytes) / NOPBytes); ++} ++ ++void RISCVAsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM, ++ const MachineInstr &MI) { ++ unsigned NOPBytes = STI->getFeatureBits()[RISCV::FeatureStdExtC] ? 2 : 4; ++ ++ StatepointOpers SOpers(&MI); ++ if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { ++ assert(PatchBytes % NOPBytes == 0 && "Invalid number of NOP bytes requested!"); ++ emitNops(PatchBytes / NOPBytes); ++ } else { ++ // Lower call target and choose correct opcode ++ const MachineOperand &CallTarget = SOpers.getCallTarget(); ++ MCOperand CallTargetMCOp; ++ switch (CallTarget.getType()) { ++ case MachineOperand::MO_GlobalAddress: ++ case MachineOperand::MO_ExternalSymbol: ++ llvm::LowerRISCVMachineOperandToMCOperand(CallTarget, CallTargetMCOp, ++ *this); ++ EmitToStreamer( ++ OutStreamer, ++ MCInstBuilder(RISCV::PseudoCALL).addOperand(CallTargetMCOp)); ++ break; ++ case MachineOperand::MO_Immediate: ++ CallTargetMCOp = MCOperand::createImm(CallTarget.getImm()); ++ EmitToStreamer(OutStreamer, MCInstBuilder(RISCV::JAL) ++ .addReg(RISCV::X1) ++ .addOperand(CallTargetMCOp)); ++ break; ++ case MachineOperand::MO_Register: ++ CallTargetMCOp = MCOperand::createReg(CallTarget.getReg()); ++ EmitToStreamer(OutStreamer, MCInstBuilder(RISCV::JALR) ++ .addReg(RISCV::X1) ++ .addOperand(CallTargetMCOp) ++ .addImm(0)); ++ break; ++ default: ++ llvm_unreachable("Unsupported operand type in statepoint call target"); ++ break; ++ } ++ } ++ ++ auto &Ctx = OutStreamer.getContext(); ++ MCSymbol *MILabel = Ctx.createTempSymbol(); ++ OutStreamer.emitLabel(MILabel); ++ SM.recordStatepoint(*MILabel, MI); ++} ++ + #define GEN_COMPRESS_INSTR + #include "RISCVGenCompressInstEmitter.inc" + void RISCVAsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) { +@@ -93,8 +261,20 @@ void RISCVAsmPrinter::emitInstruction(const MachineInstr *MI) { + return; + + MCInst TmpInst; +- if (!lowerRISCVMachineInstrToMCInst(MI, TmpInst, *this)) +- EmitToStreamer(*OutStreamer, TmpInst); ++ switch (MI->getOpcode()) { ++ default: ++ if (!lowerRISCVMachineInstrToMCInst(MI, TmpInst, *this)) ++ EmitToStreamer(*OutStreamer, TmpInst); ++ return; ++ case TargetOpcode::STACKMAP: ++ return LowerSTACKMAP(*OutStreamer, SM, *MI); ++ ++ case TargetOpcode::PATCHPOINT: ++ return LowerPATCHPOINT(*OutStreamer, SM, *MI); ++ ++ case TargetOpcode::STATEPOINT: ++ return LowerSTATEPOINT(*OutStreamer, SM, *MI); ++ } + } + + bool RISCVAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, +@@ -188,6 +368,8 @@ void RISCVAsmPrinter::emitEndOfAsmFile(Module &M) { + + if (TM.getTargetTriple().isOSBinFormatELF()) + RTS.finishAttributeSection(); ++ ++ emitStackMaps(SM); + } + + void RISCVAsmPrinter::emitAttributes() { +diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +index 274b86593e0f..6adb772343f8 100644 +--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp ++++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +@@ -8728,6 +8728,19 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, + return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget); + case RISCV::PseudoQuietFLT_D: + return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget); ++ case TargetOpcode::STATEPOINT: ++ // STATEPOINT is a pseudo instruction which has no implicit defs/uses ++ // while jal call instruction (where statepoint will be lowered at the end) ++ // has implicit def. Add this implicit dead def here as a workaround. ++ MI.addOperand(*MI.getMF(), MachineOperand::CreateReg(RISCV::X1, true, true, ++ false, true)); ++ LLVM_FALLTHROUGH; ++ case TargetOpcode::STACKMAP: ++ case TargetOpcode::PATCHPOINT: ++ if (!Subtarget.is64Bit()) ++ report_fatal_error("STACKMAP, PATCHPOINT and STATEPOINT are only " ++ "supported on 64-bit targets"); ++ return emitPatchPoint(MI, BB); + } + } + +diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +index 55f4a19b79eb..f2db9c8b39fc 100644 +--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp ++++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +@@ -25,6 +25,7 @@ + #include "llvm/CodeGen/MachineInstrBuilder.h" + #include "llvm/CodeGen/MachineRegisterInfo.h" + #include "llvm/CodeGen/RegisterScavenging.h" ++#include "llvm/CodeGen/StackMaps.h" + #include "llvm/MC/MCInstBuilder.h" + #include "llvm/MC/TargetRegistry.h" + #include "llvm/Support/ErrorHandling.h" +@@ -987,7 +988,25 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { + if (isCompressibleInst(MI, &ST, MRI, STI)) + return 2; + } +- return get(Opcode).getSize(); ++ ++ switch (Opcode) { ++ case TargetOpcode::STACKMAP: ++ // The upper bound for a stackmap intrinsic is the full length of its shadow ++ return StackMapOpers(&MI).getNumPatchBytes(); ++ case TargetOpcode::PATCHPOINT: ++ // The size of the patchpoint intrinsic is the number of bytes requested ++ return PatchPointOpers(&MI).getNumPatchBytes(); ++ case TargetOpcode::STATEPOINT: { ++ // The size of the statepoint intrinsic is the number of bytes requested ++ unsigned NumBytes = StatepointOpers(&MI).getNumPatchBytes(); ++ // A statepoint is at least a PSEUDOCALL ++ if (NumBytes < 8) ++ NumBytes = 8; ++ return NumBytes; ++ } ++ default: ++ return get(Opcode).getSize(); ++ } + } + + bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const { +diff --git a/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll +new file mode 100644 +index 000000000000..1e2fe065b7ed +--- /dev/null ++++ b/llvm/test/CodeGen/RISCV/rv64-patchpoint.ll +@@ -0,0 +1,68 @@ ++; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ++; RUN: llc -mtriple=riscv64 -debug-entry-values -enable-misched=0 < %s | FileCheck %s ++ ++; Trivial patchpoint codegen ++; ++define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { ++; CHECK-LABEL: trivial_patchpoint_codegen: ++; CHECK: # %bb.0: # %entry ++; CHECK-NEXT: addi sp, sp, -16 ++; CHECK-NEXT: .cfi_def_cfa_offset 16 ++; CHECK-NEXT: sd s0, 8(sp) # 8-byte Folded Spill ++; CHECK-NEXT: sd s1, 0(sp) # 8-byte Folded Spill ++; CHECK-NEXT: .cfi_offset s0, -8 ++; CHECK-NEXT: .cfi_offset s1, -16 ++; CHECK-NEXT: mv s0, a0 ++; CHECK-NEXT: .Ltmp0: ++; CHECK-NEXT: lui ra, 3563 ++; CHECK-NEXT: addiw ra, ra, -577 ++; CHECK-NEXT: slli ra, ra, 12 ++; CHECK-NEXT: addi ra, ra, -259 ++; CHECK-NEXT: slli ra, ra, 12 ++; CHECK-NEXT: addi ra, ra, -1282 ++; CHECK-NEXT: jalr ra ++; CHECK-NEXT: mv s1, a0 ++; CHECK-NEXT: mv a0, s0 ++; CHECK-NEXT: mv a1, s1 ++; CHECK-NEXT: .Ltmp1: ++; CHECK-NEXT: lui ra, 3563 ++; CHECK-NEXT: addiw ra, ra, -577 ++; CHECK-NEXT: slli ra, ra, 12 ++; CHECK-NEXT: addi ra, ra, -259 ++; CHECK-NEXT: slli ra, ra, 12 ++; CHECK-NEXT: addi ra, ra, -1281 ++; CHECK-NEXT: jalr ra ++; CHECK-NEXT: mv a0, s1 ++; CHECK-NEXT: ld s0, 8(sp) # 8-byte Folded Reload ++; CHECK-NEXT: ld s1, 0(sp) # 8-byte Folded Reload ++; CHECK-NEXT: addi sp, sp, 16 ++; CHECK-NEXT: ret ++entry: ++ %resolveCall2 = inttoptr i64 244837814094590 to i8* ++ %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 2, i32 28, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4) ++ %resolveCall3 = inttoptr i64 244837814094591 to i8* ++ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 3, i32 28, i8* %resolveCall3, i32 2, i64 %p1, i64 %result) ++ ret i64 %result ++} ++ ++; Test small patchpoints that don't emit calls. ++define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) { ++; CHECK-LABEL: small_patchpoint_codegen: ++; CHECK: # %bb.0: # %entry ++; CHECK-NEXT: .cfi_def_cfa_offset 0 ++; CHECK-NEXT: .Ltmp2: ++; CHECK-NEXT: nop ++; CHECK-NEXT: nop ++; CHECK-NEXT: nop ++; CHECK-NEXT: nop ++; CHECK-NEXT: nop ++; CHECK-NEXT: ret ++entry: ++ %result = tail call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2) ++ ret void ++} ++ ++declare void @llvm.experimental.stackmap(i64, i32, ...) ++declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...) ++declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...) ++ +diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll +new file mode 100644 +index 000000000000..e1bed39a500b +--- /dev/null ++++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-frame-setup.ll +@@ -0,0 +1,22 @@ ++; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ++; RUN: llc -o - -verify-machineinstrs -mtriple=riscv64 -stop-after machine-sink %s | FileCheck %s --check-prefix=ISEL ++ ++define void @caller_meta_leaf() { ++ ; ISEL-LABEL: name: caller_meta_leaf ++ ; ISEL: bb.0.entry: ++ ; ISEL-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 13 ++ ; ISEL-NEXT: SD killed [[ADDI]], %stack.0.metadata, 0 :: (store (s64) into %ir.metadata) ++ ; ISEL-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 ++ ; ISEL-NEXT: STACKMAP 4, 0, 0, %stack.0.metadata, 0 :: (load (s64) from %stack.0.metadata) ++ ; ISEL-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $x2, implicit $x2 ++ ; ISEL-NEXT: PseudoRET ++entry: ++ %metadata = alloca i64, i32 3, align 8 ++ store i64 11, i64* %metadata ++ store i64 12, i64* %metadata ++ store i64 13, i64* %metadata ++ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata) ++ ret void ++} ++ ++declare void @llvm.experimental.stackmap(i64, i32, ...) +diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll +new file mode 100644 +index 000000000000..6edc88de9232 +--- /dev/null ++++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-nops.ll +@@ -0,0 +1,18 @@ ++; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ++; RUN: llc < %s -mtriple=riscv64 | FileCheck %s ++ ++define void @test_shadow_optimization() { ++; CHECK-LABEL: test_shadow_optimization: ++; CHECK: # %bb.0: # %entry ++; CHECK-NEXT: .cfi_def_cfa_offset 0 ++; CHECK-NEXT: .Ltmp0: ++; CHECK-NEXT: nop ++; CHECK-NEXT: nop ++; CHECK-NEXT: nop ++; CHECK-NEXT: ret ++entry: ++ tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 16) ++ ret void ++} ++ ++declare void @llvm.experimental.stackmap(i64, i32, ...) +diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll +new file mode 100644 +index 000000000000..e1dde9733731 +--- /dev/null ++++ b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll +@@ -0,0 +1,384 @@ ++; RUN: llc -mtriple=riscv64 < %s | FileCheck %s ++ ++; CHECK-LABEL: .section .llvm_stackmaps ++; CHECK-NEXT: __LLVM_StackMaps: ++; Header ++; CHECK-NEXT: .byte 3 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 0 ++; Num Functions ++; CHECK-NEXT: .word 12 ++; Num LargeConstants ++; CHECK-NEXT: .word 2 ++; Num Callsites ++; CHECK-NEXT: .word 16 ++ ++; Functions and stack size ++; CHECK-NEXT: .quad constantargs ++; CHECK-NEXT: .quad 0 ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .quad osrinline ++; CHECK-NEXT: .quad 32 ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .quad osrcold ++; CHECK-NEXT: .quad 0 ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .quad propertyRead ++; CHECK-NEXT: .quad 16 ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .quad propertyWrite ++; CHECK-NEXT: .quad 0 ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .quad jsVoidCall ++; CHECK-NEXT: .quad 0 ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .quad jsIntCall ++; CHECK-NEXT: .quad 0 ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .quad liveConstant ++; CHECK-NEXT: .quad 0 ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .quad spilledValue ++; CHECK-NEXT: .quad 144 ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .quad directFrameIdx ++; CHECK-NEXT: .quad 48 ++; CHECK-NEXT: .quad 2 ++; CHECK-NEXT: .quad longid ++; CHECK-NEXT: .quad 0 ++; CHECK-NEXT: .quad 4 ++; CHECK-NEXT: .quad needsStackRealignment ++; CHECK-NEXT: .quad -1 ++; CHECK-NEXT: .quad 1 ++ ++; Num LargeConstants ++; CHECK-NEXT: .quad 4294967295 ++; CHECK-NEXT: .quad 4294967296 ++ ++; Constant arguments ++; ++; CHECK-NEXT: .quad 1 ++; CHECK-NEXT: .word .L{{.*}}-constantargs ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 4 ++; SmallConstant ++; CHECK-NEXT: .byte 4 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 65535 ++; SmallConstant ++; CHECK-NEXT: .byte 4 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 65536 ++; SmallConstant ++; CHECK-NEXT: .byte 5 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++; LargeConstant at index 0 ++; CHECK-NEXT: .byte 5 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 1 ++ ++define void @constantargs() { ++entry: ++ %0 = inttoptr i64 244837814094590 to i8* ++ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 1, i32 28, i8* %0, i32 0, i64 65535, i64 65536, i64 4294967295, i64 4294967296) ++ ret void ++} ++ ++; Inline OSR Exit ++; ++; CHECK: .word .L{{.*}}-osrinline ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++define void @osrinline(i64 %a, i64 %b) { ++entry: ++ ; Runtime void->void call. ++ call void inttoptr (i64 244837814094590 to void ()*)() ++ ; Followed by inline OSR patchpoint with 12-byte shadow and 2 live vars. ++ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 3, i32 12, i64 %a, i64 %b) ++ ret void ++} ++ ++; Cold OSR Exit ++; ++; 2 live variables in register. ++; ++; CHECK: .word .L{{.*}}-osrcold ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++define void @osrcold(i64 %a, i64 %b) { ++entry: ++ %test = icmp slt i64 %a, %b ++ br i1 %test, label %ret, label %cold ++cold: ++ ; OSR patchpoint with 28-byte nop-slide and 2 live vars. ++ %thunk = inttoptr i64 244837814094590 to i8* ++ call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4, i32 28, i8* %thunk, i32 0, i64 %a, i64 %b) ++ unreachable ++ret: ++ ret void ++} ++ ++; Property Read ++; CHECK-LABEL: .word .L{{.*}}-propertyRead ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++define i64 @propertyRead(i64* %obj) { ++entry: ++ %resolveRead = inttoptr i64 244837814094590 to i8* ++ %result = call anyregcc i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 5, i32 28, i8* %resolveRead, i32 1, i64* %obj) ++ %add = add i64 %result, 3 ++ ret i64 %add ++} ++ ++; Property Write ++; CHECK: .word .L{{.*}}-propertyWrite ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++define void @propertyWrite(i64 %dummy1, i64* %obj, i64 %dummy2, i64 %a) { ++entry: ++ %resolveWrite = inttoptr i64 244837814094590 to i8* ++ call anyregcc void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 6, i32 28, i8* %resolveWrite, i32 2, i64* %obj, i64 %a) ++ ret void ++} ++ ++; Void JS Call ++; ++; 2 live variables in registers. ++; ++; CHECK: .word .L{{.*}}-jsVoidCall ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++define void @jsVoidCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) { ++entry: ++ %resolveCall = inttoptr i64 244837814094590 to i8* ++ call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 7, i32 28, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) ++ ret void ++} ++ ++; i64 JS Call ++; ++; 2 live variables in registers. ++; ++; CHECK: .word .L{{.*}}-jsIntCall ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++; CHECK-NEXT: .byte 1 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half {{[0-9]+}} ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 0 ++define i64 @jsIntCall(i64 %dummy1, i64* %obj, i64 %arg, i64 %l1, i64 %l2) { ++entry: ++ %resolveCall = inttoptr i64 244837814094590 to i8* ++ %result = call i64 (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.i64(i64 8, i32 28, i8* %resolveCall, i32 2, i64* %obj, i64 %arg, i64 %l1, i64 %l2) ++ %add = add i64 %result, 3 ++ ret i64 %add ++} ++ ++; Map a constant value. ++; ++; CHECK: .word .L{{.*}}-liveConstant ++; CHECK-NEXT: .half 0 ++; 1 location ++; CHECK-NEXT: .half 1 ++; Loc 0: SmallConstant ++; CHECK-NEXT: .byte 4 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word 33 ++ ++define void @liveConstant() { ++ tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 15, i32 8, i32 33) ++ ret void ++} ++ ++; Spilled stack map values. ++; ++; Verify 28 stack map entries. ++; ++; CHECK-LABEL: .word .L{{.*}}-spilledValue ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .half 28 ++; ++; Check that at least one is a spilled entry from RBP. ++; Location: Indirect RBP + ... ++; CHECK: .byte 3 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word ++define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) { ++entry: ++ call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, i8* null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i64 %l25, i64 %l26, i64 %l27) ++ ret void ++} ++ ++; Directly map an alloca's address. ++; ++; Callsite 16 ++; CHECK-LABEL: .word .L{{.*}}-directFrameIdx ++; CHECK-NEXT: .half 0 ++; 1 location ++; CHECK-NEXT: .half 1 ++; Loc 0: Direct RBP - ofs ++; CHECK-NEXT: .byte 2 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word ++ ++; Callsite 17 ++; CHECK-LABEL: .word .L{{.*}}-directFrameIdx ++; CHECK-NEXT: .half 0 ++; 2 locations ++; CHECK-NEXT: .half 2 ++; Loc 0: Direct RBP - ofs ++; CHECK-NEXT: .byte 2 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word ++; Loc 1: Direct RBP - ofs ++; CHECK-NEXT: .byte 2 ++; CHECK-NEXT: .byte 0 ++; CHECK-NEXT: .half 8 ++; CHECK-NEXT: .half 2 ++; CHECK-NEXT: .half 0 ++; CHECK-NEXT: .word ++define void @directFrameIdx() { ++entry: ++ %metadata1 = alloca i64, i32 3, align 8 ++ store i64 11, i64* %metadata1 ++ store i64 12, i64* %metadata1 ++ store i64 13, i64* %metadata1 ++ call void (i64, i32, ...) @llvm.experimental.stackmap(i64 16, i32 0, i64* %metadata1) ++ %metadata2 = alloca i8, i32 4, align 8 ++ %metadata3 = alloca i16, i32 4, align 8 ++ call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 17, i32 4, i8* null, i32 0, i8* %metadata2, i16* %metadata3) ++ ret void ++} ++ ++; Test a 64-bit ID. ++; ++; CHECK: .quad 4294967295 ++; CHECK-LABEL: .word .L{{.*}}-longid ++; CHECK: .quad 4294967296 ++; CHECK-LABEL: .word .L{{.*}}-longid ++; CHECK: .quad 9223372036854775807 ++; CHECK-LABEL: .word .L{{.*}}-longid ++; CHECK: .quad -1 ++; CHECK-LABEL: .word .L{{.*}}-longid ++define void @longid() { ++entry: ++ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967295, i32 0, i8* null, i32 0) ++ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 4294967296, i32 0, i8* null, i32 0) ++ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 9223372036854775807, i32 0, i8* null, i32 0) ++ tail call void (i64, i32, i8*, i32, ...) @llvm.experimental.patchpoint.void(i64 -1, i32 0, i8* null, i32 0) ++ ret void ++} ++ ++; A stack frame which needs to be realigned at runtime (to meet alignment ++; criteria for values on the stack) does not have a fixed frame size. ++; CHECK-LABEL: .word .L{{.*}}-needsStackRealignment ++; CHECK-NEXT: .half 0 ++; 0 locations ++; CHECK-NEXT: .half 0 ++define void @needsStackRealignment() { ++ %val = alloca i64, i32 3, align 128 ++ tail call void (...) @escape_values(i64* %val) ++; Note: Adding any non-constant to the stackmap would fail because we ++; expected to be able to address off the frame pointer. In a realigned ++; frame, we must use the stack pointer instead. This is a separate bug. ++ tail call void (i64, i32, ...) @llvm.experimental.stackmap(i64 0, i32 0) ++ ret void ++} ++declare void @escape_values(...) ++ ++declare void @llvm.experimental.stackmap(i64, i32, ...) ++declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...) ++declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...) +-- +2.36.0 + diff --git a/sdk/llvm-patches/native-image/README.md b/sdk/llvm-patches/native-image/README.md index 2b58425e5ce0..87801beafbe7 100644 --- a/sdk/llvm-patches/native-image/README.md +++ b/sdk/llvm-patches/native-image/README.md @@ -8,3 +8,4 @@ or in preparation for such review: * [Statepoints] Support for compressed pointers in the statepoint emission pass _(review in preparation)_ * [AArch64] Introduce option to force placement of the frame record on top of the stack frame _(review in preparation)_ +* [RISCV] Add support for RISC-V Stackmaps/Statepoints/Patchpoints ([review](https://reviews.llvm.org/D123496)) diff --git a/sdk/mx.sdk/suite.py b/sdk/mx.sdk/suite.py index aaf4537fd8f5..f48470686454 100644 --- a/sdk/mx.sdk/suite.py +++ b/sdk/mx.sdk/suite.py @@ -179,33 +179,37 @@ ], }, "LLVM_ORG" : { - "version" : "14.0.3-2-g772a7a659e-bgd661ed0276", + "version" : "14.0.6-3-gc7a4a53c32-bgc5e298fd27", "host" : "https://lafo.ssw.uni-linz.ac.at/pub/llvm-org", "os_arch" : { "linux" : { "amd64" : { "urls" : ["{host}/llvm-llvmorg-{version}-linux-amd64.tar.gz"], - "sha1" : "f32dc5497d9dd679f9ef42fd26918c3ad659b8e3", + "sha1" : "4cd45eff8e914189dd8bebcfaf46271c412c57fa", }, "aarch64" : { "urls" : ["{host}/llvm-llvmorg-{version}-linux-aarch64.tar.gz"], - "sha1" : "8bf8d3f96b6be5d361e809d1928c3b6c46791dfe", - } + "sha1" : "bf95d0cb96d29d061e2106f221f9535d38d37daf", + }, + "riscv64": { + "urls" : ["{host}/llvm-llvmorg-{version}-linux-riscv64.tar.gz"], + "sha1" : "4a75da563e277f5d222778f2b814b8e5f7e82609", + }, }, "darwin" : { "amd64" : { "urls" : ["{host}/llvm-llvmorg-{version}-darwin-amd64.tar.gz"], - "sha1" : "3c8677b9802fb4b98584b719b75161385c794013", + "sha1" : "61960f183a08436c16652c6bc8b0de67468899fb", }, "aarch64" : { "urls" : ["{host}/llvm-llvmorg-{version}-darwin-aarch64.tar.gz"], - "sha1" : "9e759cc4d31392d20a84596c6216952fc27054b8", + "sha1" : "f8a11403a5a975ff7eb231c785979a37000cbf62", } }, "windows" : { "amd64" : { "urls" : ["{host}/llvm-llvmorg-{version}-windows-amd64.tar.gz"], - "sha1" : "5ec8f96f0d77c0ba24b2e0f6b507252b5f116916", + "sha1" : "3e4cae36505ba566a983b3beacdb3523ccbf114e", } }, "": { @@ -217,19 +221,19 @@ "license" : "Apache-2.0-LLVM", }, "LLVM_ORG_COMPILER_RT_LINUX" : { - "version" : "14.0.3-2-g772a7a659e-bgd661ed0276", + "version" : "14.0.6-3-gc7a4a53c32-bgc5e298fd27", "host" : "https://lafo.ssw.uni-linz.ac.at/pub/llvm-org", # we really want linux-amd64, also on non-linux and non-amd64 platforms for cross-compilation "urls" : ["{host}/compiler-rt-llvmorg-{version}-linux-amd64.tar.gz"], - "sha1" : "95e3f75f819c510111d8c460d74a7a9186b59e5a", + "sha1" : "e214d63812b9276880e5c4e1849a493653f1e269", "license" : "Apache-2.0-LLVM", }, "LLVM_ORG_SRC" : { - "version" : "14.0.3-2-g772a7a659e-bgd661ed0276", + "version" : "14.0.6-3-gc7a4a53c32-bgc5e298fd27", "host" : "https://lafo.ssw.uni-linz.ac.at/pub/llvm-org", "packedResource" : True, "urls" : ["{host}/llvm-src-llvmorg-{version}.tar.gz"], - "sha1" : "f762ff022e26af02b0fbb3176d7280bc4809f562", + "sha1" : "4b631ecd732e38d491ff6f41da796e393cb1d874", "license" : "Apache-2.0-LLVM", }, }, diff --git a/sdk/src/org.graalvm.home/src/org/graalvm/home/HomeFinder.java b/sdk/src/org.graalvm.home/src/org/graalvm/home/HomeFinder.java index c4e805f9d5b1..0e4b854e5fe1 100644 --- a/sdk/src/org.graalvm.home/src/org/graalvm/home/HomeFinder.java +++ b/sdk/src/org.graalvm.home/src/org/graalvm/home/HomeFinder.java @@ -98,8 +98,19 @@ public static HomeFinder getInstance() { if (ImageInfo.inImageCode() && ImageSingletons.contains(HomeFinder.class)) { return ImageSingletons.lookup(HomeFinder.class); } - final ServiceLoader serviceLoader = ServiceLoader.load(HomeFinder.class); - final Iterator iterator = serviceLoader.iterator(); + Class lookupClass = HomeFinder.class; + ModuleLayer moduleLayer = lookupClass.getModule().getLayer(); + Iterable services; + if (moduleLayer != null) { + services = ServiceLoader.load(moduleLayer, HomeFinder.class); + } else { + services = ServiceLoader.load(HomeFinder.class, lookupClass.getClassLoader()); + } + Iterator iterator = services.iterator(); + if (!iterator.hasNext()) { + services = ServiceLoader.load(HomeFinder.class); + iterator = services.iterator(); + } try { return iterator.next(); } catch (NoSuchElementException e) { diff --git a/sdk/src/org.graalvm.nativeimage/src/org/graalvm/nativeimage/ImageInfo.java b/sdk/src/org.graalvm.nativeimage/src/org/graalvm/nativeimage/ImageInfo.java index 94a3090fc1fd..6575bb126f1f 100644 --- a/sdk/src/org.graalvm.nativeimage/src/org/graalvm/nativeimage/ImageInfo.java +++ b/sdk/src/org.graalvm.nativeimage/src/org/graalvm/nativeimage/ImageInfo.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * The Universal Permissive License (UPL), Version 1.0 @@ -118,7 +118,7 @@ private ImageInfo() { * @since 19.0 */ public static boolean inImageCode() { - return System.getProperty(PROPERTY_IMAGE_CODE_KEY) != null; + return inImageBuildtimeCode() || inImageRuntimeCode(); } /** diff --git a/sdk/src/org.graalvm.polyglot/src/org/graalvm/polyglot/Engine.java b/sdk/src/org.graalvm.polyglot/src/org/graalvm/polyglot/Engine.java index 66b483912c2d..647a06575b4c 100644 --- a/sdk/src/org.graalvm.polyglot/src/org/graalvm/polyglot/Engine.java +++ b/sdk/src/org.graalvm.polyglot/src/org/graalvm/polyglot/Engine.java @@ -906,7 +906,20 @@ public AbstractPolyglotImpl run() { } private Iterator searchServiceLoader() throws InternalError { - return ServiceLoader.load(AbstractPolyglotImpl.class).iterator(); + Class lookupClass = AbstractPolyglotImpl.class; + ModuleLayer moduleLayer = lookupClass.getModule().getLayer(); + Iterable services; + if (moduleLayer != null) { + services = ServiceLoader.load(moduleLayer, AbstractPolyglotImpl.class); + } else { + services = ServiceLoader.load(AbstractPolyglotImpl.class, lookupClass.getClassLoader()); + } + Iterator iterator = services.iterator(); + if (!iterator.hasNext()) { + services = ServiceLoader.load(AbstractPolyglotImpl.class); + iterator = services.iterator(); + } + return iterator; } private AbstractPolyglotImpl loadAndValidateProviders(Iterator providers) throws AssertionError { diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/ConstantTypeFlow.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/ConstantTypeFlow.java new file mode 100644 index 000000000000..6a43d5aad840 --- /dev/null +++ b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/ConstantTypeFlow.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.graal.pointsto.flow; + +import com.oracle.graal.pointsto.PointsToAnalysis; +import com.oracle.graal.pointsto.meta.AnalysisType; +import com.oracle.graal.pointsto.typestate.TypeState; + +import jdk.vm.ci.code.BytecodePosition; + +/** + * Models a flow that introduces a constant in the type flow graph. Depending on the analysis policy + * this could be just the type of the constant, without the object identity. + */ +public class ConstantTypeFlow extends TypeFlow { + + /** The constant state is propagated when the flow is linked in. */ + private final TypeState constantState; + + /** Constant flow has an immutable type state. */ + public ConstantTypeFlow(BytecodePosition position, AnalysisType type, TypeState constantState) { + super(position, type, TypeState.forEmpty()); + this.constantState = constantState; + assert source != null; + assert declaredType == null || declaredType.isInstantiated() : "Type " + declaredType + " not instantiated " + position; + } + + public ConstantTypeFlow(ConstantTypeFlow original, MethodFlowsGraph methodFlows) { + super(original, methodFlows); + this.constantState = original.constantState; + } + + @Override + public TypeFlow copy(PointsToAnalysis bb, MethodFlowsGraph methodFlows) { + return new ConstantTypeFlow(this, methodFlows); + } + + @Override + public void initFlow(PointsToAnalysis bb) { + /* + * Inject state into graphs lazily, only after the type flow graph is pruned. When context + * sensitivity is enabled the default graph is kept clean and used as a template for clones. + */ + addState(bb, constantState); + } + + @Override + public String toString() { + return "ConstantFlow<" + getState() + ">"; + } +} diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodFlowsGraph.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodFlowsGraph.java index 2645e07c9307..e505e8af6012 100644 --- a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodFlowsGraph.java +++ b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodFlowsGraph.java @@ -45,7 +45,6 @@ import com.oracle.graal.pointsto.flow.OffsetStoreTypeFlow.AbstractUnsafeStoreTypeFlow; import com.oracle.graal.pointsto.meta.AnalysisMethod; import com.oracle.graal.pointsto.meta.PointsToAnalysisMethod; -import com.oracle.graal.pointsto.typestate.TypeState; import com.oracle.graal.pointsto.util.AnalysisError; import jdk.vm.ci.code.BytecodePosition; @@ -96,10 +95,6 @@ public > T lookupCloneOf(@SuppressWarnings("unused") Point public void init(final PointsToAnalysis bb) { for (TypeFlow flow : flows()) { - if (flow instanceof NewInstanceTypeFlow) { - NewInstanceTypeFlow newInstance = (NewInstanceTypeFlow) flow; - newInstance.setSourceState(TypeState.forExactType(bb, newInstance.type(), false)); - } if (flow instanceof AbstractUnsafeLoadTypeFlow) { bb.registerUnsafeLoad((AbstractUnsafeLoadTypeFlow) flow); } diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodFlowsGraphClone.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodFlowsGraphClone.java index 8e68d6ddb71e..bbf691650564 100644 --- a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodFlowsGraphClone.java +++ b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodFlowsGraphClone.java @@ -29,12 +29,12 @@ import java.util.ArrayList; import java.util.List; -import com.oracle.graal.pointsto.util.AnalysisError; import org.graalvm.collections.EconomicMap; import com.oracle.graal.pointsto.PointsToAnalysis; import com.oracle.graal.pointsto.flow.context.AnalysisContext; import com.oracle.graal.pointsto.meta.PointsToAnalysisMethod; +import com.oracle.graal.pointsto.util.AnalysisError; public class MethodFlowsGraphClone extends MethodFlowsGraph { @@ -182,7 +182,7 @@ public void linkClones(final PointsToAnalysis bb) { for (TypeFlow originalUse : original.getUses()) { // only clone the original uses assert !(originalUse instanceof AllInstantiatedTypeFlow); - assert !(originalUse.isClone()); + assert !(originalUse.isClone()) : "Original use " + originalUse + " should not be a clone. Reached from: " + original; if (nonCloneableFlow(originalUse)) { clone.addUse(bb, originalUse); diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodTypeFlowBuilder.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodTypeFlowBuilder.java index 4d588041bb3e..2d852755b1e2 100644 --- a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodTypeFlowBuilder.java +++ b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/MethodTypeFlowBuilder.java @@ -105,7 +105,6 @@ import org.graalvm.compiler.replacements.nodes.ObjectClone; import org.graalvm.compiler.replacements.nodes.UnaryMathIntrinsicNode; import org.graalvm.compiler.word.WordCastNode; -import com.oracle.svm.util.GuardedAnnotationAccess; import com.oracle.graal.pointsto.PointsToAnalysis; import com.oracle.graal.pointsto.flow.LoadFieldTypeFlow.LoadInstanceFieldTypeFlow; @@ -129,6 +128,8 @@ import com.oracle.graal.pointsto.phases.InlineBeforeAnalysis; import com.oracle.graal.pointsto.results.StaticAnalysisResultsBuilder; import com.oracle.graal.pointsto.typestate.TypeState; +import com.oracle.graal.pointsto.util.AnalysisError; +import com.oracle.svm.util.GuardedAnnotationAccess; import jdk.vm.ci.code.BytecodeFrame; import jdk.vm.ci.code.BytecodePosition; @@ -398,8 +399,8 @@ protected void apply() { if (node.asJavaConstant() == null && constant instanceof VMConstant) { // do nothing } else if (node.asJavaConstant().isNull()) { - TypeFlowBuilder sourceBuilder = TypeFlowBuilder.create(bb, node, SourceTypeFlow.class, () -> { - SourceTypeFlow constantSource = new SourceTypeFlow(sourcePosition(node), TypeState.forNull()); + TypeFlowBuilder sourceBuilder = TypeFlowBuilder.create(bb, node, ConstantTypeFlow.class, () -> { + ConstantTypeFlow constantSource = new ConstantTypeFlow(sourcePosition(node), null, TypeState.forNull()); flowsGraph.addMiscEntryFlow(constantSource); return constantSource; }); @@ -413,8 +414,8 @@ protected void apply() { assert StampTool.isExactType(node); AnalysisType type = (AnalysisType) StampTool.typeOrNull(node); assert type.isInstantiated(); - TypeFlowBuilder sourceBuilder = TypeFlowBuilder.create(bb, node, SourceTypeFlow.class, () -> { - SourceTypeFlow constantSource = new SourceTypeFlow(sourcePosition(node), TypeState.forConstant(this.bb, node.asJavaConstant(), type)); + TypeFlowBuilder sourceBuilder = TypeFlowBuilder.create(bb, node, ConstantTypeFlow.class, () -> { + ConstantTypeFlow constantSource = new ConstantTypeFlow(sourcePosition(node), type, TypeState.forConstant(this.bb, node.asJavaConstant(), type)); flowsGraph.addMiscEntryFlow(constantSource); return constantSource; }); @@ -503,13 +504,15 @@ public TypeFlowBuilder lookup(ValueNode n) { */ ObjectStamp stamp = (ObjectStamp) n.stamp(NodeView.DEFAULT); if (stamp.isEmpty()) { - result = TypeFlowBuilder.create(bb, node, SourceTypeFlow.class, () -> new SourceTypeFlow(sourcePosition(node), TypeState.forEmpty())); - } else if (stamp.isExactType()) { + throw AnalysisError.shouldNotReachHere("Stamp for node " + n + " is empty."); + } + if (stamp.isExactType()) { /* * We are lucky: the stamp tells us which type the node has. */ result = TypeFlowBuilder.create(bb, node, SourceTypeFlow.class, () -> { - SourceTypeFlow src = new SourceTypeFlow(sourcePosition(node), TypeState.forExactType(bb, (AnalysisType) stamp.type(), !stamp.nonNull())); + AnalysisType type = (AnalysisType) stamp.type(); + SourceTypeFlow src = new SourceTypeFlow(sourcePosition(node), type, !stamp.nonNull()); flowsGraph.addMiscEntryFlow(src); return src; }); @@ -520,22 +523,13 @@ public TypeFlowBuilder lookup(ValueNode n) { * to the node's type). Is is a conservative assumption. */ AnalysisType type = (AnalysisType) (stamp.type() == null ? bb.getObjectType() : stamp.type()); - - if (type.isJavaLangObject()) { - /* Return a proxy to the all-instantiated type flow. */ - result = TypeFlowBuilder.create(bb, node, TypeFlow.class, () -> { - TypeFlow proxy = bb.analysisPolicy().proxy(sourcePosition(node), bb.getAllInstantiatedTypeFlow()); - flowsGraph.addMiscEntryFlow(proxy); - return proxy; - }); - } else { - result = TypeFlowBuilder.create(bb, node, TypeFlow.class, () -> { - TypeFlow proxy = bb.analysisPolicy().proxy(sourcePosition(node), type.getTypeFlow(bb, true)); - flowsGraph.addMiscEntryFlow(proxy); - return proxy; - }); - } + result = TypeFlowBuilder.create(bb, node, TypeFlow.class, () -> { + TypeFlow proxy = bb.analysisPolicy().proxy(sourcePosition(node), type.getTypeFlow(bb, true)); + flowsGraph.addMiscEntryFlow(proxy); + return proxy; + }); } + flows.put(node, result); } return result; diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/NewInstanceTypeFlow.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/NewInstanceTypeFlow.java index d1b1d000e895..ac01ca92aebf 100644 --- a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/NewInstanceTypeFlow.java +++ b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/NewInstanceTypeFlow.java @@ -36,7 +36,11 @@ import jdk.vm.ci.code.BytecodePosition; -public class NewInstanceTypeFlow extends SourceTypeFlowBase { +/** + * Models a flow that introduces an instantiated type in the type flow graph. The type can originate + * from a new-instance, new-array, new-multi-array, box, arrays-copy-of, etc. + */ +public class NewInstanceTypeFlow extends TypeFlow { @SuppressWarnings("rawtypes") // private static final AtomicReferenceFieldUpdater HEAP_OBJECTS_CACHE_UPDATER = // @@ -49,23 +53,29 @@ public class NewInstanceTypeFlow extends SourceTypeFlowBase { * NewInstanceTypeFlow per context of a method, however, depending of the analysis policy, * multiple NewInstanceTypeFlows can generate objects with the same allocation context. */ - protected volatile ConcurrentMap heapObjectsCache; - - /** Source flow has an immutable type state. */ - protected final AnalysisType type; + volatile ConcurrentMap heapObjectsCache; public NewInstanceTypeFlow(BytecodePosition position, AnalysisType type) { - this(position, type, TypeState.forNull()); + /* The actual type state is set lazily in initFlow(). */ + super(position, type, TypeState.forEmpty()); + assert source != null; + assert declaredType.isInstantiated() : "Type " + declaredType + " not instantiated " + position; } - protected NewInstanceTypeFlow(BytecodePosition position, AnalysisType type, TypeState typeState) { - super(position, type, typeState); - this.type = type; + @Override + public void initFlow(PointsToAnalysis bb) { + if (!isClone()) { + /* + * Inject state into graphs lazily, only after the type flow graph is pruned. When + * context sensitivity is enabled the default graph is kept clean and used as a template + * for clones. For clones the state is provided by createCloneState(), on creation. + */ + addState(bb, TypeState.forExactType(bb, declaredType, false)); + } } - protected NewInstanceTypeFlow(PointsToAnalysis bb, NewInstanceTypeFlow original, MethodFlowsGraph methodFlows) { - super(bb, original, methodFlows, original.cloneSourceState(bb, methodFlows)); - this.type = original.type; + NewInstanceTypeFlow(PointsToAnalysis bb, NewInstanceTypeFlow original, MethodFlowsGraph methodFlows) { + super(original, methodFlows, original.createCloneState(bb, methodFlows)); } @Override @@ -74,12 +84,12 @@ public TypeFlow copy(PointsToAnalysis bb, MethodFlowsGraph met } /** Create the type state for a clone. */ - protected TypeState cloneSourceState(PointsToAnalysis bb, MethodFlowsGraph methodFlows) { + TypeState createCloneState(PointsToAnalysis bb, MethodFlowsGraph methodFlows) { AnalysisContext allocationContext = bb.analysisPolicy().allocationContext(bb, methodFlows); - if (bb.analysisPolicy().isContextSensitiveAllocation(bb, type, allocationContext)) { + if (bb.analysisPolicy().isContextSensitiveAllocation(bb, declaredType, allocationContext)) { /* - * If the analysis is context sensitive create a new heap object for the new context, or + * If the analysis is context-sensitive create a new heap object for the new context, or * return an existing one. The original NewInstanceTypeFlow is the one that stores the * (Context->HeapObject) mapping. */ @@ -91,7 +101,7 @@ protected TypeState cloneSourceState(PointsToAnalysis bb, MethodFlowsGraph metho * allocation site) we use just the type of the object wrapped into the AbstractObject * base class. There is no cloning in this case. */ - return TypeState.forExactType(bb, type, false); + return TypeState.forExactType(bb, declaredType, false); } } @@ -104,17 +114,13 @@ private AnalysisObject createHeapObject(PointsToAnalysis bb, AnalysisContext obj AnalysisObject result = heapObjectsCache.get(objContext); if (result == null) { - AnalysisObject newValue = bb.analysisPolicy().createHeapObject(bb, type, source, objContext); + AnalysisObject newValue = bb.analysisPolicy().createHeapObject(bb, declaredType, source, objContext); AnalysisObject oldValue = heapObjectsCache.putIfAbsent(objContext, newValue); result = oldValue != null ? oldValue : newValue; } return result; } - public AnalysisType type() { - return type; - } - @Override public String toString() { return "NewInstanceFlow<" + getState() + ">"; diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/SourceTypeFlow.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/SourceTypeFlow.java index 8a2c061edbf4..a88b7425f45b 100644 --- a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/SourceTypeFlow.java +++ b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/SourceTypeFlow.java @@ -24,33 +24,77 @@ */ package com.oracle.graal.pointsto.flow; +import org.graalvm.compiler.nodes.extended.BytecodeExceptionNode; +import org.graalvm.compiler.nodes.extended.JavaReadNode; + import com.oracle.graal.pointsto.PointsToAnalysis; +import com.oracle.graal.pointsto.meta.AnalysisType; import com.oracle.graal.pointsto.typestate.TypeState; +import com.oracle.graal.pointsto.util.AnalysisError; import jdk.vm.ci.code.BytecodePosition; -public class SourceTypeFlow extends SourceTypeFlowBase { +/** + * Models a flow that *conditionally* introduces a type in the type flow graph only if it is marked + * as instantiated, e.g., by scanning a constant of that type or parsing an allocation bytecode. + * This flow is used for nodes that produce an object but don't register its type as instantiated + * themselves, e.g., like {@link JavaReadNode} or {@link BytecodeExceptionNode}. Also LoadHubNode, + * GetClassNode, LoadVMThreadLocalNode. + * + * The type state of this source is "empty" or "null" until the declared type is marked as + * instantiated, depending on the null state of the node stamp. When this flow is initialized it + * registers a callback with its declared type such that when the type is marked as instantiated it + * propagates the source state. If the declared type is already instantiated when the source flow is + * initialized then the callback is immediately triggered. + * + * If the type is really never instantiated, i.e., {@link AnalysisType#isInstantiated()} is still + * false at the end of the static analysis, then the callback is never triggered. That is correct, + * because in this case that type can never be produced by this flow (and the only possible value is + * {@code null}, if the stamp can be null, or empty). + */ +public final class SourceTypeFlow extends TypeFlow { - /** - * Source flow has an immutable type state. - */ - public SourceTypeFlow(BytecodePosition position, TypeState state) { - super(position, state); + public SourceTypeFlow(BytecodePosition position, AnalysisType type, boolean canBeNull) { + super(position, type, canBeNull); } - public SourceTypeFlow(PointsToAnalysis bb, SourceTypeFlow original, MethodFlowsGraph methodFlows) { - super(bb, original, methodFlows); + public SourceTypeFlow(SourceTypeFlow original, MethodFlowsGraph methodFlows) { + super(original, methodFlows); } @Override public TypeFlow copy(PointsToAnalysis bb, MethodFlowsGraph methodFlows) { - return new SourceTypeFlow(bb, this, methodFlows); + return new SourceTypeFlow(this, methodFlows); + } + + @Override + public void initFlow(PointsToAnalysis bb) { + /* Propagate the source state when the type is marked as instantiated. */ + declaredType.registerInstantiatedCallback(a -> addState(bb, TypeState.forExactType(bb, declaredType, false))); + } + + @Override + public void onObservedSaturated(PointsToAnalysis bb, TypeFlow observed) { + AnalysisError.shouldNotReachHere("NewInstanceTypeFlow cannot saturate."); + } + + @Override + protected void onInputSaturated(PointsToAnalysis bb, TypeFlow input) { + AnalysisError.shouldNotReachHere("NewInstanceTypeFlow cannot saturate."); + } + + @Override + protected void onSaturated(PointsToAnalysis bb) { + AnalysisError.shouldNotReachHere("NewInstanceTypeFlow cannot saturate."); + } + + @Override + public boolean canSaturate() { + return false; } @Override public String toString() { - StringBuilder str = new StringBuilder(); - str.append("SourceFlow<").append(getState()).append(">"); - return str.toString(); + return "SourceFlow<" + getState() + ">"; } } diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/SourceTypeFlowBase.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/SourceTypeFlowBase.java deleted file mode 100644 index e086cee996c1..000000000000 --- a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/SourceTypeFlowBase.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package com.oracle.graal.pointsto.flow; - -import com.oracle.graal.pointsto.PointsToAnalysis; -import com.oracle.graal.pointsto.meta.AnalysisType; -import com.oracle.graal.pointsto.typestate.TypeState; -import com.oracle.graal.pointsto.util.AnalysisError; - -import jdk.vm.ci.code.BytecodePosition; - -/** - * The all-instantiated type state is defined as the maximum state that is allowed. If our type was - * just discovered as instantiated, it is not yet part of the "all instantiated" flow (so that - * updates to the all-instantiated flow can be batched together). - * - * Therefore, the type state of this source is "empty" until the "all instantiated" type state gets - * updated. We discover that by registering ourself as an observer of the all-instantiated type - * flow, and de-registering ourselves as soon as we change our type from "empty" to the actual exact - * type. - * - * To temporarily suspend updates containing the type that is not in all-instantiated yet we save - * the state in a temporary, sourceState, which is copied in the type state of the source flow when - * all-instantiated is updated. - * - * After the source state type is added to the all-instantiated state this.state and - * this.sourceState point to the same state object (due to TypeState.addState() union operation - * special case optimization). - * - * If the type is really never instantiated, i.e., AnalysisType.isInstantiated() is still false at - * the end of the static analysis, then the rewrite never happens. That is correct, because in this - * case that type can never be returned by this flow (and the only possible value is null, which is - * set regardless of the type update). - */ -public abstract class SourceTypeFlowBase extends TypeFlow { - - /** - * The source state is a temporary buffer for this flow's type state. The source state is added - * to the flow, and propagated to its uses, only when its exact type is added to the - * all-instantiated type state. - */ - protected TypeState sourceState; - - public SourceTypeFlowBase(BytecodePosition position, TypeState state) { - this(position, state.exactType(), state); - } - - public SourceTypeFlowBase(BytecodePosition position, AnalysisType declaredType, TypeState state) { - super(position, declaredType); - this.sourceState = state; - assert source != null; - } - - public SourceTypeFlowBase(PointsToAnalysis bb, SourceTypeFlowBase original, MethodFlowsGraph methodFlows) { - this(bb, original, methodFlows, original.sourceState); - } - - public SourceTypeFlowBase(@SuppressWarnings("unused") PointsToAnalysis bb, SourceTypeFlowBase original, MethodFlowsGraph methodFlows, TypeState state) { - super(original, methodFlows); - this.sourceState = state; - } - - public void setSourceState(TypeState sourceState) { - this.sourceState = sourceState; - } - - @Override - public void initFlow(PointsToAnalysis bb) { - /* When the clone is linked check if the all-instantiated contains the source state type. */ - if (sourceState.isNull() || sourceState.isEmpty() || bb.getAllInstantiatedTypeFlow().getState().containsType(sourceState.exactType())) { - /* If yes, set the state and propagate it to uses. */ - addState(bb, sourceState); - } else { - /* - * If no, update the can-be-null state of the source flow and register it as an observer - * for all-instantiated. - */ - addState(bb, sourceState.canBeNull() ? TypeState.forNull() : TypeState.forEmpty()); - bb.getAllInstantiatedTypeFlow().addObserver(bb, this); - } - } - - @Override - public void onObservedUpdate(PointsToAnalysis bb) { - /* When the all-instantiated changes it will notify the source flow. */ - if (bb.getAllInstantiatedTypeFlow().getState().containsType(sourceState.exactType())) { - /* The source state type was instantiated. */ - /* Now the source flow can be removed from the all-instantiated observers. */ - bb.getAllInstantiatedTypeFlow().removeObserver(this); - /* Update the state and propagate it to uses. */ - addState(bb, sourceState); - } - } - - @Override - public void onObservedSaturated(PointsToAnalysis bb, TypeFlow observed) { - AnalysisError.shouldNotReachHere("NewInstanceTypeFlow cannot saturate."); - } - - @Override - protected void onInputSaturated(PointsToAnalysis bb, TypeFlow input) { - AnalysisError.shouldNotReachHere("NewInstanceTypeFlow cannot saturate."); - } - - @Override - protected void onSaturated(PointsToAnalysis bb) { - AnalysisError.shouldNotReachHere("NewInstanceTypeFlow cannot saturate."); - } - - @Override - public boolean canSaturate() { - return false; - } - -} diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/TypeFlow.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/TypeFlow.java index 018d7de6ce6f..1ca77a7233f2 100644 --- a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/TypeFlow.java +++ b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/flow/TypeFlow.java @@ -155,7 +155,11 @@ public TypeFlow(T source, AnalysisType declaredType, TypeState state) { * @param graphRef the holder method clone */ public TypeFlow(TypeFlow original, MethodFlowsGraph graphRef) { - this(original.getSource(), original.getDeclaredType(), TypeState.forEmpty(), original.getSlot(), true, graphRef); + this(original, graphRef, TypeState.forEmpty()); + } + + public TypeFlow(TypeFlow original, MethodFlowsGraph graphRef, TypeState cloneState) { + this(original.getSource(), original.getDeclaredType(), cloneState, original.getSlot(), true, graphRef); this.usedAsAParameter = original.usedAsAParameter; this.usedAsAReceiver = original.usedAsAReceiver; PointsToStats.registerTypeFlowRetainReason(this, original); diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/meta/AnalysisElement.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/meta/AnalysisElement.java index 349cd1a45d23..f71a838a7964 100644 --- a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/meta/AnalysisElement.java +++ b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/meta/AnalysisElement.java @@ -51,18 +51,18 @@ public abstract class AnalysisElement { @SuppressWarnings("unused") private volatile Object elementReachableNotifications; - public void registerReachabilityNotification(ElementReachableNotification notification) { + public void registerReachabilityNotification(ElementNotification notification) { ConcurrentLightHashSet.addElement(this, reachableNotificationsUpdater, notification); } - public void notifyReachabilityCallback(AnalysisUniverse universe, ElementReachableNotification notification) { + public void notifyReachabilityCallback(AnalysisUniverse universe, ElementNotification notification) { notification.notifyCallback(universe, this); ConcurrentLightHashSet.removeElement(this, reachableNotificationsUpdater, notification); } protected void notifyReachabilityCallbacks(AnalysisUniverse universe, List> futures) { - ConcurrentLightHashSet.forEach(this, reachableNotificationsUpdater, (ElementReachableNotification c) -> futures.add(c.notifyCallback(universe, this))); - ConcurrentLightHashSet.removeElementIf(this, reachableNotificationsUpdater, ElementReachableNotification::isNotified); + ConcurrentLightHashSet.forEach(this, reachableNotificationsUpdater, (ElementNotification c) -> futures.add(c.notifyCallback(universe, this))); + ConcurrentLightHashSet.removeElementIf(this, reachableNotificationsUpdater, ElementNotification::isNotified); } public abstract boolean isReachable(); @@ -74,12 +74,12 @@ public boolean isTriggered() { return isReachable(); } - public static final class ElementReachableNotification { + public static final class ElementNotification { private final Consumer callback; private final AtomicReference> notified = new AtomicReference<>(); - public ElementReachableNotification(Consumer callback) { + public ElementNotification(Consumer callback) { this.callback = callback; } @@ -91,7 +91,7 @@ public boolean isNotified() { * Notify the callback exactly once. Note that this callback can be shared by multiple * triggers, the one that triggers it is passed into triggeredElement for debugging. */ - private AnalysisFuture notifyCallback(AnalysisUniverse universe, AnalysisElement triggeredElement) { + AnalysisFuture notifyCallback(AnalysisUniverse universe, AnalysisElement triggeredElement) { assert triggeredElement.isTriggered(); var existing = notified.get(); if (existing != null) { diff --git a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/meta/AnalysisType.java b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/meta/AnalysisType.java index b73fa431ba98..ded0243b010d 100644 --- a/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/meta/AnalysisType.java +++ b/substratevm/src/com.oracle.graal.pointsto/src/com/oracle/graal/pointsto/meta/AnalysisType.java @@ -42,6 +42,7 @@ import org.graalvm.compiler.debug.GraalError; import org.graalvm.compiler.graph.Node; +import org.graalvm.nativeimage.hosted.Feature.DuringAnalysisAccess; import org.graalvm.word.WordBase; import com.oracle.graal.pointsto.BigBang; @@ -93,6 +94,9 @@ public abstract class AnalysisType extends AnalysisElement implements WrappedJav private static final AtomicReferenceFieldUpdater overrideReachableNotificationsUpdater = AtomicReferenceFieldUpdater .newUpdater(AnalysisType.class, Object.class, "overrideReachableNotifications"); + private static final AtomicReferenceFieldUpdater instantiatedNotificationsUpdater = AtomicReferenceFieldUpdater + .newUpdater(AnalysisType.class, Object.class, "typeInstantiatedNotifications"); + private static final AtomicIntegerFieldUpdater isInHeapUpdater = AtomicIntegerFieldUpdater .newUpdater(AnalysisType.class, "isInHeap"); @@ -203,6 +207,8 @@ public enum UsageKind { */ List> scheduledTypeReachableNotifications; + @SuppressWarnings("unused") private volatile Object typeInstantiatedNotifications; + public AnalysisType(AnalysisUniverse universe, ResolvedJavaType javaType, JavaKind storageKind, AnalysisType objectType, AnalysisType cloneableType) { this.universe = universe; this.wrapped = javaType; @@ -483,6 +489,7 @@ public boolean registerAsAllocated(Node node) { protected void onInstantiated(UsageKind usage) { universe.onTypeInstantiated(this, usage); + notifyInstantiatedCallbacks(); processMethodOverrides(); } @@ -575,6 +582,33 @@ public Set getOverrideReachabilityNotificat return ConcurrentLightHashMap.getOrDefault(this, overrideReachableNotificationsUpdater, method, Collections.emptySet()); } + public void registerInstantiatedCallback(Consumer callback) { + if (this.isInstantiated()) { + /* If the type is already instantiated just trigger the callback. */ + callback.accept(universe.getConcurrentAnalysisAccess()); + } else { + ElementNotification notification = new ElementNotification(callback); + ConcurrentLightHashSet.addElement(this, instantiatedNotificationsUpdater, notification); + if (this.isInstantiated()) { + /* + * If the type became instantiated during registration manually trigger the + * callback. + */ + notifyInstantiatedCallback(notification); + } + } + } + + private void notifyInstantiatedCallback(ElementNotification notification) { + notification.notifyCallback(universe, this); + ConcurrentLightHashSet.removeElement(this, instantiatedNotificationsUpdater, notification); + } + + protected void notifyInstantiatedCallbacks() { + ConcurrentLightHashSet.forEach(this, instantiatedNotificationsUpdater, (ElementNotification c) -> c.notifyCallback(universe, this)); + ConcurrentLightHashSet.removeElementIf(this, instantiatedNotificationsUpdater, ElementNotification::isNotified); + } + /** * Iterates all super types for this type, where a super type is defined as any type that is * assignable from this type, feeding each of them to the consumer. diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java index f6bb9eefdb83..69f4b5373efb 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/HeapImpl.java @@ -31,7 +31,6 @@ import org.graalvm.compiler.api.replacements.Fold; import org.graalvm.compiler.core.common.NumUtil; import org.graalvm.compiler.core.common.SuppressFBWarnings; -import org.graalvm.compiler.nodes.gc.BarrierSet; import org.graalvm.compiler.nodes.memory.address.OffsetAddressNode; import org.graalvm.compiler.word.Word; import org.graalvm.nativeimage.CurrentIsolate; @@ -58,7 +57,6 @@ import com.oracle.svm.core.genscavenge.ThreadLocalAllocation.Descriptor; import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; import com.oracle.svm.core.genscavenge.graal.ForcedSerialPostWriteBarrier; -import com.oracle.svm.core.genscavenge.remset.RememberedSet; import com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets; import com.oracle.svm.core.heap.GC; import com.oracle.svm.core.heap.GCCause; @@ -88,8 +86,6 @@ import com.oracle.svm.core.util.UnsignedUtils; import com.oracle.svm.core.util.UserError; -import jdk.vm.ci.meta.MetaAccessProvider; - public final class HeapImpl extends Heap { /** Synchronization means for notifying {@link #refPendingList} waiters without deadlocks. */ private static final VMMutex REF_MUTEX = new VMMutex("referencePendingList"); @@ -468,11 +464,6 @@ boolean walkNativeImageHeapRegions(MemoryWalker.ImageHeapRegionVisitor visitor) (!AuxiliaryImageHeap.isPresent() || AuxiliaryImageHeap.singleton().walkRegions(visitor)); } - @Override - public BarrierSet createBarrierSet(MetaAccessProvider metaAccess) { - return RememberedSet.get().createBarrierSet(metaAccess); - } - @Override public void doReferenceHandling() { if (ReferenceHandler.isExecutedManually()) { diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java index 8fc32d74ac27..1a930ece064d 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ObjectHeaderImpl.java @@ -111,15 +111,6 @@ public static UnsignedWord readHeaderFromObject(Object o) { } } - public static UnsignedWord readHeaderFromObjectCarefully(Object o) { - VMError.guarantee(o != null, "ObjectHeader.readHeaderFromObjectCarefully: o: null"); - UnsignedWord header = readHeaderFromObject(o); - VMError.guarantee(header.notEqual(WordFactory.zero()), "ObjectHeader.readHeaderFromObjectCarefully: header: 0"); - VMError.guarantee(!isProducedHeapChunkZapped(header), "ObjectHeader.readHeaderFromObjectCarefully: header: producedZapValue"); - VMError.guarantee(!isConsumedHeapChunkZapped(header), "ObjectHeader.readHeaderFromObjectCarefully: header: consumedZapValue"); - return header; - } - @Override @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public DynamicHub readDynamicHubFromPointer(Pointer ptr) { diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/PinnedObjectImpl.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/PinnedObjectImpl.java index 22f85383591c..2e896a58770a 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/PinnedObjectImpl.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/PinnedObjectImpl.java @@ -25,16 +25,14 @@ package com.oracle.svm.core.genscavenge; import org.graalvm.compiler.word.Word; -import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.PinnedObject; -import org.graalvm.nativeimage.hosted.Feature; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; import org.graalvm.nativeimage.impl.PinnedObjectSupport; import org.graalvm.word.Pointer; import org.graalvm.word.PointerBase; import org.graalvm.word.UnsignedWord; -import com.oracle.svm.core.SubstrateOptions; -import com.oracle.svm.core.annotate.AutomaticFeature; import com.oracle.svm.core.Uninterruptible; import com.oracle.svm.core.heap.ObjectHeader; import com.oracle.svm.core.hub.DynamicHub; @@ -43,8 +41,12 @@ import com.oracle.svm.core.thread.VMOperation; /** Support for pinning objects to a memory address with {@link PinnedObject}. */ -final class PinnedObjectImpl implements PinnedObject { - static class PinnedObjectSupportImpl implements PinnedObjectSupport { +public final class PinnedObjectImpl implements PinnedObject { + public static class PinnedObjectSupportImpl implements PinnedObjectSupport { + @Platforms(Platform.HOSTED_ONLY.class) + public PinnedObjectSupportImpl() { + } + @Override public PinnedObject create(Object object) { PinnedObjectImpl result = new PinnedObjectImpl(object); @@ -65,19 +67,6 @@ public boolean isPinned(Object object) { } } - @AutomaticFeature - static class PinnedObjectFeature implements Feature { - @Override - public boolean isInConfiguration(IsInConfigurationAccess access) { - return SubstrateOptions.UseSerialGC.getValue() || SubstrateOptions.UseEpsilonGC.getValue(); - } - - @Override - public void afterRegistration(AfterRegistrationAccess access) { - ImageSingletons.add(PinnedObjectSupport.class, new PinnedObjectSupportImpl()); - } - } - static void pushPinnedObject(PinnedObjectImpl newHead) { // To avoid ABA problems, the application may only push data. All other operations may only // be executed by the GC. diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java index b3f98eb06b2b..3bfd58aa59e9 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/ThreadLocalAllocation.java @@ -27,6 +27,8 @@ import static com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets.TLAB_END_IDENTITY; import static com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets.TLAB_TOP_IDENTITY; +import com.oracle.svm.core.heap.Pod; +import com.oracle.svm.core.thread.Continuation; import org.graalvm.compiler.api.replacements.Fold; import org.graalvm.compiler.replacements.AllocationSnippets.FillContent; import org.graalvm.compiler.word.Word; @@ -54,8 +56,10 @@ import com.oracle.svm.core.genscavenge.graal.nodes.FormatArrayNode; import com.oracle.svm.core.genscavenge.graal.nodes.FormatObjectNode; import com.oracle.svm.core.genscavenge.graal.nodes.FormatPodNode; +import com.oracle.svm.core.genscavenge.graal.nodes.FormatStoredContinuationNode; import com.oracle.svm.core.graal.snippets.DeoptTester; import com.oracle.svm.core.heap.OutOfMemoryUtil; +import com.oracle.svm.core.heap.StoredContinuation; import com.oracle.svm.core.hub.DynamicHub; import com.oracle.svm.core.hub.LayoutEncoding; import com.oracle.svm.core.log.Log; @@ -169,24 +173,6 @@ private static Descriptor getTlab() { return regularTLAB.getAddress(); } - @SubstrateForeignCallTarget(stubCallingConvention = false) - private static Object slowPathNewInstance(Word objectHeader) { - /* - * Avoid stack overflow errors while producing memory chunks, because that could leave the - * heap in an inconsistent state. - */ - StackOverflowCheck.singleton().makeYellowZoneAvailable(); - try { - DynamicHub hub = ObjectHeaderImpl.getObjectHeaderImpl().dynamicHubFromObjectHeader(objectHeader); - - Object result = slowPathNewInstanceWithoutAllocating(hub); - runSlowPathHooks(); - return result; - } finally { - StackOverflowCheck.singleton().protectYellowZone(); - } - } - /** * NOTE: Multiple threads may execute this method concurrently. All code that is transitively * reachable from this method may get executed as a side effect of an allocation slow path. To @@ -219,6 +205,24 @@ private static void runSlowPathHooks() { GCImpl.getPolicy().updateSizeParameters(); } + @SubstrateForeignCallTarget(stubCallingConvention = false) + private static Object slowPathNewInstance(Word objectHeader) { + /* + * Avoid stack overflow errors while producing memory chunks, because that could leave the + * heap in an inconsistent state. + */ + StackOverflowCheck.singleton().makeYellowZoneAvailable(); + try { + DynamicHub hub = ObjectHeaderImpl.getObjectHeaderImpl().dynamicHubFromObjectHeader(objectHeader); + + Object result = slowPathNewInstanceWithoutAllocating(hub); + runSlowPathHooks(); + return result; + } finally { + StackOverflowCheck.singleton().protectYellowZone(); + } + } + @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate in the implementation of allocation.") private static Object slowPathNewInstanceWithoutAllocating(DynamicHub hub) { DeoptTester.disableDeoptTesting(); @@ -234,16 +238,21 @@ private static Object slowPathNewInstanceWithoutAllocating(DynamicHub hub) { } @SubstrateForeignCallTarget(stubCallingConvention = false) - private static Object slowPathNewArray(Word objectHeader, int length, int fillStartOffset) { - return slowPathNewArrayOrPodImpl(objectHeader, length, fillStartOffset, null); + private static Object slowPathNewArray(Word objectHeader, int length) { + return slowPathNewArrayLikeObject(objectHeader, length, null); } @SubstrateForeignCallTarget(stubCallingConvention = false) - private static Object slowPathNewPodInstance(Word objectHeader, int arrayLength, int fillStartOffset, byte[] referenceMap) { - return slowPathNewArrayOrPodImpl(objectHeader, arrayLength, fillStartOffset, referenceMap); + private static Object slowPathNewStoredContinuation(Word objectHeader, int length) { + return slowPathNewArrayLikeObject(objectHeader, length, null); } - private static Object slowPathNewArrayOrPodImpl(Word objectHeader, int length, int fillStartOffset, byte[] podReferenceMap) { + @SubstrateForeignCallTarget(stubCallingConvention = false) + private static Object slowPathNewPodInstance(Word objectHeader, int arrayLength, byte[] referenceMap) { + return slowPathNewArrayLikeObject(objectHeader, arrayLength, referenceMap); + } + + private static Object slowPathNewArrayLikeObject(Word objectHeader, int length, byte[] podReferenceMap) { /* * Avoid stack overflow errors while producing memory chunks, because that could leave the * heap in an inconsistent state. @@ -267,7 +276,7 @@ private static Object slowPathNewArrayOrPodImpl(Word objectHeader, int length, i throw OutOfMemoryUtil.reportOutOfMemoryError(outOfMemoryError); } - Object result = slowPathNewArrayOrPodWithoutAllocating(hub, length, size, fillStartOffset, podReferenceMap); + Object result = slowPathNewArrayLikeObject0(hub, length, size, podReferenceMap); runSlowPathHooks(); return result; } finally { @@ -276,7 +285,7 @@ private static Object slowPathNewArrayOrPodImpl(Word objectHeader, int length, i } @RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate in the implementation of allocation.") - private static Object slowPathNewArrayOrPodWithoutAllocating(DynamicHub hub, int length, UnsignedWord size, int fillStartOffset, byte[] podReferenceMap) { + private static Object slowPathNewArrayLikeObject0(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap) { DeoptTester.disableDeoptTesting(); try { HeapImpl.exitIfAllocationDisallowed("ThreadLocalAllocation.slowPathNewArrayOrPodWithoutAllocating", DynamicHub.toClass(hub).getName()); @@ -286,16 +295,16 @@ private static Object slowPathNewArrayOrPodWithoutAllocating(DynamicHub hub, int /* Large arrays go into their own unaligned chunk. */ boolean needsZeroing = !HeapChunkProvider.areUnalignedChunksZeroed(); UnalignedHeapChunk.UnalignedHeader newTlabChunk = HeapImpl.getChunkProvider().produceUnalignedChunk(size); - return allocateLargeArrayOrPodInNewTlab(hub, length, size, fillStartOffset, newTlabChunk, needsZeroing, podReferenceMap); + return allocateLargeArrayLikeObjectInNewTlab(hub, length, size, newTlabChunk, needsZeroing, podReferenceMap); } /* Small arrays go into the regular aligned chunk. */ // We might have allocated in the caller and acquired a TLAB with enough space already // (but we need to check in an uninterruptible method to be safe) - Object array = allocateSmallArrayOrPodInCurrentTlab(hub, length, size, fillStartOffset, podReferenceMap); + Object array = allocateSmallArrayLikeObjectInCurrentTlab(hub, length, size, podReferenceMap); if (array == null) { // We need a new chunk. AlignedHeader newTlabChunk = HeapImpl.getChunkProvider().produceAlignedChunk(); - array = allocateSmallArrayOrPodInNewTlab(hub, length, size, fillStartOffset, newTlabChunk, podReferenceMap); + array = allocateSmallArrayLikeObjectInNewTlab(hub, length, size, newTlabChunk, podReferenceMap); } return array; } finally { @@ -311,24 +320,22 @@ private static Object allocateInstanceInNewTlab(DynamicHub hub, AlignedHeader ne } @Uninterruptible(reason = "Holds uninitialized memory.") - private static Object allocateSmallArrayOrPodInCurrentTlab(DynamicHub hub, int length, UnsignedWord size, int fillStartOffset, byte[] podReferenceMap) { + private static Object allocateSmallArrayLikeObjectInCurrentTlab(DynamicHub hub, int length, UnsignedWord size, byte[] podReferenceMap) { if (size.aboveThan(availableTlabMemory(getTlab()))) { return null; } Pointer memory = allocateRawMemoryInTlab(size, getTlab()); - return formatArrayOrPod(memory, hub, length, false, FillContent.WITH_ZEROES, fillStartOffset, podReferenceMap); + return formatArrayLikeObject(memory, hub, length, false, FillContent.WITH_ZEROES, podReferenceMap); } @Uninterruptible(reason = "Holds uninitialized memory.") - private static Object allocateSmallArrayOrPodInNewTlab(DynamicHub hub, int length, UnsignedWord size, int fillStartOffset, AlignedHeader newTlabChunk, byte[] podReferenceMap) { + private static Object allocateSmallArrayLikeObjectInNewTlab(DynamicHub hub, int length, UnsignedWord size, AlignedHeader newTlabChunk, byte[] podReferenceMap) { Pointer memory = allocateRawMemoryInNewTlab(size, newTlabChunk); - return formatArrayOrPod(memory, hub, length, false, FillContent.WITH_ZEROES, fillStartOffset, podReferenceMap); + return formatArrayLikeObject(memory, hub, length, false, FillContent.WITH_ZEROES, podReferenceMap); } @Uninterruptible(reason = "Holds uninitialized memory, modifies TLAB") - private static Object allocateLargeArrayOrPodInNewTlab(DynamicHub hub, int length, UnsignedWord size, int fillStartOffset, - UnalignedHeader newTlabChunk, boolean needsZeroing, byte[] podReferenceMap) { - + private static Object allocateLargeArrayLikeObjectInNewTlab(DynamicHub hub, int length, UnsignedWord size, UnalignedHeader newTlabChunk, boolean needsZeroing, byte[] podReferenceMap) { ThreadLocalAllocation.Descriptor tlab = getTlab(); HeapChunk.setNext(newTlabChunk, tlab.getUnalignedChunk()); @@ -350,16 +357,18 @@ private static Object allocateLargeArrayOrPodInNewTlab(DynamicHub hub, int lengt * any way. */ FillContent fillKind = needsZeroing ? FillContent.WITH_ZEROES : FillContent.DO_NOT_FILL; - return formatArrayOrPod(memory, hub, length, true, fillKind, fillStartOffset, podReferenceMap); + return formatArrayLikeObject(memory, hub, length, true, fillKind, podReferenceMap); } @Uninterruptible(reason = "Holds uninitialized memory") - private static Object formatArrayOrPod(Pointer memory, DynamicHub hub, int length, boolean unaligned, FillContent fillContent, int fillStartOffset, byte[] podReferenceMap) { + private static Object formatArrayLikeObject(Pointer memory, DynamicHub hub, int length, boolean unaligned, FillContent fillContent, byte[] podReferenceMap) { Class clazz = DynamicHub.toClass(hub); - if (podReferenceMap != null) { - return FormatPodNode.formatPod(memory, clazz, length, podReferenceMap, false, unaligned, fillContent, fillStartOffset, true); + if (Continuation.isSupported() && clazz == StoredContinuation.class) { + return FormatStoredContinuationNode.formatStoredContinuation(memory, clazz, length, false, unaligned, true); + } else if (Pod.RuntimeSupport.isPresent() && podReferenceMap != null) { + return FormatPodNode.formatPod(memory, clazz, length, podReferenceMap, false, unaligned, fillContent, true); } - return FormatArrayNode.formatArray(memory, clazz, length, false, unaligned, fillContent, fillStartOffset, true); + return FormatArrayNode.formatArray(memory, clazz, length, false, unaligned, fillContent, true); } @Uninterruptible(reason = "Returns uninitialized memory, modifies TLAB", callerMustBe = true) diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSnippets.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSnippets.java index a12f696a904d..1cc5a3143eec 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSnippets.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSnippets.java @@ -51,11 +51,15 @@ import com.oracle.svm.core.genscavenge.graal.nodes.FormatArrayNode; import com.oracle.svm.core.genscavenge.graal.nodes.FormatObjectNode; import com.oracle.svm.core.genscavenge.graal.nodes.FormatPodNode; +import com.oracle.svm.core.genscavenge.graal.nodes.FormatStoredContinuationNode; import com.oracle.svm.core.graal.snippets.NodeLoweringProvider; import com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets; import com.oracle.svm.core.graal.snippets.SubstrateTemplates; +import com.oracle.svm.core.heap.Pod; import com.oracle.svm.core.hub.DynamicHub; import com.oracle.svm.core.hub.LayoutEncoding; +import com.oracle.svm.core.thread.Continuation; +import com.oracle.svm.core.thread.ContinuationSupport; import jdk.vm.ci.meta.JavaKind; @@ -71,27 +75,37 @@ public static Object formatObjectSnippet(Word memory, DynamicHub hub, boolean re } @Snippet - public static Object formatArraySnippet(Word memory, DynamicHub hub, int length, boolean rememberedSet, boolean unaligned, AllocationSnippets.FillContent fillContents, int fillStartOffset, - boolean emitMemoryBarrier, @ConstantParameter boolean supportsBulkZeroing, @ConstantParameter boolean supportsOptimizedFilling, + public static Object formatArraySnippet(Word memory, DynamicHub hub, int length, boolean rememberedSet, boolean unaligned, AllocationSnippets.FillContent fillContents, boolean emitMemoryBarrier, + @ConstantParameter boolean supportsBulkZeroing, @ConstantParameter boolean supportsOptimizedFilling, @ConstantParameter AllocationSnippets.AllocationSnippetCounters snippetCounters) { DynamicHub hubNonNull = (DynamicHub) PiNode.piCastNonNull(hub, SnippetAnchorNode.anchor()); int layoutEncoding = hubNonNull.getLayoutEncoding(); UnsignedWord size = LayoutEncoding.getArraySize(layoutEncoding, length); Word objectHeader = encodeAsObjectHeader(hubNonNull, rememberedSet, unaligned); - return alloc().formatArray(objectHeader, size, length, memory, fillContents, fillStartOffset, - emitMemoryBarrier, false, supportsBulkZeroing, supportsOptimizedFilling, snippetCounters); + return alloc().formatArray(objectHeader, size, length, memory, fillContents, emitMemoryBarrier, false, supportsBulkZeroing, supportsOptimizedFilling, snippetCounters); + } + + @Snippet + public static Object formatStoredContinuation(Word memory, DynamicHub hub, int length, boolean rememberedSet, boolean unaligned, @ConstantParameter long ipOffset, + @ConstantParameter boolean emitMemoryBarrier, @ConstantParameter AllocationSnippets.AllocationSnippetCounters snippetCounters) { + DynamicHub hubNonNull = (DynamicHub) PiNode.piCastNonNull(hub, SnippetAnchorNode.anchor()); + int layoutEncoding = hubNonNull.getLayoutEncoding(); + UnsignedWord size = LayoutEncoding.getArraySize(layoutEncoding, length); + Word objectHeader = encodeAsObjectHeader(hubNonNull, rememberedSet, unaligned); + return alloc().formatStoredContinuation(objectHeader, size, length, memory, emitMemoryBarrier, ipOffset, snippetCounters); } @Snippet public static Object formatPodSnippet(Word memory, DynamicHub hub, int arrayLength, byte[] referenceMap, boolean rememberedSet, boolean unaligned, AllocationSnippets.FillContent fillContents, - int fillStartOffset, @ConstantParameter boolean emitMemoryBarrier, @ConstantParameter boolean supportsBulkZeroing, @ConstantParameter boolean supportsOptimizedFilling, + @ConstantParameter boolean emitMemoryBarrier, @ConstantParameter boolean supportsBulkZeroing, @ConstantParameter boolean supportsOptimizedFilling, @ConstantParameter AllocationSnippets.AllocationSnippetCounters snippetCounters) { - DynamicHub hubNonNull = (DynamicHub) PiNode.piCastNonNull(hub, SnippetAnchorNode.anchor()); byte[] refMapNonNull = (byte[]) PiNode.piCastNonNull(referenceMap, SnippetAnchorNode.anchor()); Word objectHeader = encodeAsObjectHeader(hubNonNull, rememberedSet, unaligned); - return alloc().formatPod(objectHeader, hubNonNull, arrayLength, refMapNonNull, memory, fillContents, fillStartOffset, - emitMemoryBarrier, false, supportsBulkZeroing, supportsOptimizedFilling, snippetCounters); + int layoutEncoding = hubNonNull.getLayoutEncoding(); + UnsignedWord allocationSize = LayoutEncoding.getArraySize(layoutEncoding, arrayLength); + return alloc().formatPod(objectHeader, hubNonNull, allocationSize, arrayLength, refMapNonNull, memory, fillContents, emitMemoryBarrier, false, supportsBulkZeroing, supportsOptimizedFilling, + snippetCounters); } private static Word encodeAsObjectHeader(DynamicHub hub, boolean rememberedSet, boolean unaligned) { @@ -107,6 +121,7 @@ public static class Templates extends SubstrateTemplates { private final SubstrateAllocationSnippets.Templates baseTemplates; private final SnippetInfo formatObject; private final SnippetInfo formatArray; + private final SnippetInfo formatStoredContinuation; private final SnippetInfo formatPod; Templates(OptionValues options, Providers providers, SubstrateAllocationSnippets.Templates baseTemplates) { @@ -114,13 +129,19 @@ public static class Templates extends SubstrateTemplates { this.baseTemplates = baseTemplates; formatObject = snippet(GenScavengeAllocationSnippets.class, "formatObjectSnippet"); formatArray = snippet(GenScavengeAllocationSnippets.class, "formatArraySnippet"); - formatPod = snippet(GenScavengeAllocationSnippets.class, "formatPodSnippet", NamedLocationIdentity.getArrayLocation(JavaKind.Byte)); + formatStoredContinuation = Continuation.isSupported() ? snippet(GenScavengeAllocationSnippets.class, "formatStoredContinuation") : null; + formatPod = Pod.RuntimeSupport.isPresent() ? snippet(GenScavengeAllocationSnippets.class, "formatPodSnippet", NamedLocationIdentity.getArrayLocation(JavaKind.Byte)) : null; } public void registerLowering(Map, NodeLoweringProvider> lowerings) { lowerings.put(FormatObjectNode.class, new FormatObjectLowering()); lowerings.put(FormatArrayNode.class, new FormatArrayLowering()); - lowerings.put(FormatPodNode.class, new FormatPodLowering()); + if (Continuation.isSupported()) { + lowerings.put(FormatStoredContinuationNode.class, new FormatStoredContinuationLowering()); + } + if (Pod.RuntimeSupport.isPresent()) { + lowerings.put(FormatPodNode.class, new FormatPodLowering()); + } } private class FormatObjectLowering implements NodeLoweringProvider { @@ -155,7 +176,6 @@ public void lower(FormatArrayNode node, LoweringTool tool) { args.add("rememberedSet", node.getRememberedSet()); args.add("unaligned", node.getUnaligned()); args.add("fillContents", node.getFillContents()); - args.add("fillStartOffset", node.getFillStartOffset()); args.add("emitMemoryBarrier", node.getEmitMemoryBarrier()); args.addConst("supportsBulkZeroing", tool.getLowerer().supportsBulkZeroing()); args.addConst("supportsOptimizedFilling", tool.getLowerer().supportsOptimizedFilling(graph.getOptions())); @@ -164,6 +184,26 @@ public void lower(FormatArrayNode node, LoweringTool tool) { } } + private class FormatStoredContinuationLowering implements NodeLoweringProvider { + @Override + public void lower(FormatStoredContinuationNode node, LoweringTool tool) { + StructuredGraph graph = node.graph(); + if (graph.getGuardsStage() != GraphState.GuardsStage.AFTER_FSA) { + return; + } + Arguments args = new Arguments(formatStoredContinuation, graph.getGuardsStage(), tool.getLoweringStage()); + args.add("memory", node.getMemory()); + args.add("hub", node.getHub()); + args.add("length", node.getLength()); + args.add("rememberedSet", node.getRememberedSet()); + args.add("unaligned", node.getUnaligned()); + args.addConst("ipOffset", ContinuationSupport.singleton().getIPOffset()); + args.addConst("emitMemoryBarrier", node.getEmitMemoryBarrier()); + args.addConst("snippetCounters", baseTemplates.getSnippetCounters()); + template(node, args).instantiate(providers.getMetaAccess(), node, SnippetTemplate.DEFAULT_REPLACER, args); + } + } + private class FormatPodLowering implements NodeLoweringProvider { @Override public void lower(FormatPodNode node, LoweringTool tool) { @@ -179,7 +219,6 @@ public void lower(FormatPodNode node, LoweringTool tool) { args.add("rememberedSet", node.getRememberedSet()); args.add("unaligned", node.getUnaligned()); args.add("fillContents", node.getFillContents()); - args.add("fillStartOffset", node.getFillStartOffset()); args.addConst("emitMemoryBarrier", node.getEmitMemoryBarrier()); args.addConst("supportsBulkZeroing", tool.getLowerer().supportsBulkZeroing()); args.addConst("supportsOptimizedFilling", tool.getLowerer().supportsOptimizedFilling(graph.getOptions())); diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java index 26d3acb85d2d..250fbb0ec588 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeAllocationSupport.java @@ -24,6 +24,9 @@ */ package com.oracle.svm.core.genscavenge.graal; +import com.oracle.svm.core.heap.Pod; +import com.oracle.svm.core.snippets.SnippetRuntime.SubstrateForeignCallDescriptor; +import com.oracle.svm.core.thread.Continuation; import org.graalvm.compiler.core.common.spi.ForeignCallDescriptor; import org.graalvm.compiler.word.Word; import org.graalvm.word.UnsignedWord; @@ -35,27 +38,39 @@ import com.oracle.svm.core.snippets.SnippetRuntime; public class GenScavengeAllocationSupport implements GCAllocationSupport { - private static final SnippetRuntime.SubstrateForeignCallDescriptor SLOW_NEW_INSTANCE = SnippetRuntime.findForeignCall(ThreadLocalAllocation.class, "slowPathNewInstance", true); - private static final SnippetRuntime.SubstrateForeignCallDescriptor SLOW_NEW_ARRAY = SnippetRuntime.findForeignCall(ThreadLocalAllocation.class, "slowPathNewArray", true); - private static final SnippetRuntime.SubstrateForeignCallDescriptor SLOW_NEW_POD_INSTANCE = SnippetRuntime.findForeignCall(ThreadLocalAllocation.class, "slowPathNewPodInstance", true); - private static final SnippetRuntime.SubstrateForeignCallDescriptor[] FOREIGN_CALLS = new SnippetRuntime.SubstrateForeignCallDescriptor[]{SLOW_NEW_INSTANCE, SLOW_NEW_ARRAY, SLOW_NEW_POD_INSTANCE}; + private static final SubstrateForeignCallDescriptor SLOW_NEW_INSTANCE = SnippetRuntime.findForeignCall(ThreadLocalAllocation.class, "slowPathNewInstance", true); + private static final SubstrateForeignCallDescriptor SLOW_NEW_ARRAY = SnippetRuntime.findForeignCall(ThreadLocalAllocation.class, "slowPathNewArray", true); + private static final SubstrateForeignCallDescriptor SLOW_NEW_STORED_CONTINUATION = SnippetRuntime.findForeignCall(ThreadLocalAllocation.class, "slowPathNewStoredContinuation", true); + private static final SubstrateForeignCallDescriptor SLOW_NEW_POD_INSTANCE = SnippetRuntime.findForeignCall(ThreadLocalAllocation.class, "slowPathNewPodInstance", true); + private static final SubstrateForeignCallDescriptor[] UNCONDITIONAL_FOREIGN_CALLS = new SubstrateForeignCallDescriptor[]{SLOW_NEW_INSTANCE, SLOW_NEW_ARRAY}; public static void registerForeignCalls(SubstrateForeignCallsProvider foreignCalls) { - foreignCalls.register(FOREIGN_CALLS); + foreignCalls.register(UNCONDITIONAL_FOREIGN_CALLS); + if (Continuation.isSupported()) { + foreignCalls.register(SLOW_NEW_STORED_CONTINUATION); + } + if (Pod.RuntimeSupport.isPresent()) { + foreignCalls.register(SLOW_NEW_POD_INSTANCE); + } } @Override - public ForeignCallDescriptor getSlowNewInstanceStub() { + public ForeignCallDescriptor getNewInstanceStub() { return SLOW_NEW_INSTANCE; } @Override - public ForeignCallDescriptor getSlowNewArrayStub() { + public ForeignCallDescriptor getNewArrayStub() { return SLOW_NEW_ARRAY; } @Override - public ForeignCallDescriptor getSlowNewPodInstanceStub() { + public ForeignCallDescriptor getNewStoredContinuationStub() { + return SLOW_NEW_STORED_CONTINUATION; + } + + @Override + public ForeignCallDescriptor getNewPodInstanceStub() { return SLOW_NEW_POD_INSTANCE; } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java index 6d78398c31a5..b034d570dfb7 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/GenScavengeGCFeature.java @@ -33,6 +33,7 @@ import org.graalvm.compiler.phases.util.Providers; import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.hosted.Feature; +import org.graalvm.nativeimage.impl.PinnedObjectSupport; import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.annotate.AutomaticFeature; @@ -43,6 +44,7 @@ import com.oracle.svm.core.genscavenge.ImageHeapInfo; import com.oracle.svm.core.genscavenge.IncrementalGarbageCollectorMXBean; import com.oracle.svm.core.genscavenge.LinearImageHeapLayouter; +import com.oracle.svm.core.genscavenge.PinnedObjectImpl.PinnedObjectSupportImpl; import com.oracle.svm.core.genscavenge.jvmstat.EpsilonGCPerfData; import com.oracle.svm.core.genscavenge.jvmstat.SerialGCPerfData; import com.oracle.svm.core.genscavenge.remset.CardTableBasedRememberedSet; @@ -54,8 +56,9 @@ import com.oracle.svm.core.graal.snippets.GCAllocationSupport; import com.oracle.svm.core.graal.snippets.NodeLoweringProvider; import com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets; +import com.oracle.svm.core.heap.AllocationFeature; +import com.oracle.svm.core.heap.BarrierSetProvider; import com.oracle.svm.core.heap.Heap; -import com.oracle.svm.core.heap.HeapFeature; import com.oracle.svm.core.image.ImageHeapLayouter; import com.oracle.svm.core.jdk.RuntimeFeature; import com.oracle.svm.core.jdk.management.ManagementFeature; @@ -73,20 +76,27 @@ public boolean isInConfiguration(IsInConfigurationAccess access) { @Override public List> getRequiredFeatures() { - return Arrays.asList(RuntimeFeature.class, ManagementFeature.class, PerfDataFeature.class, HeapFeature.class); + return Arrays.asList(RuntimeFeature.class, ManagementFeature.class, PerfDataFeature.class, AllocationFeature.class); } @Override public void afterRegistration(AfterRegistrationAccess access) { + RememberedSet rememberedSet = createRememberedSet(); + ImageSingletons.add(RememberedSet.class, rememberedSet); + ImageSingletons.add(BarrierSetProvider.class, rememberedSet); + } + + @Override + public void duringSetup(DuringSetupAccess access) { HeapImpl heap = new HeapImpl(SubstrateOptions.getPageSize()); ImageSingletons.add(Heap.class, heap); - ImageSingletons.add(RememberedSet.class, createRememberedSet()); ImageSingletons.add(GCAllocationSupport.class, new GenScavengeAllocationSupport()); ManagementSupport managementSupport = ManagementSupport.getSingleton(); managementSupport.addPlatformManagedObjectSingleton(java.lang.management.MemoryMXBean.class, new HeapImplMemoryMXBean()); managementSupport.addPlatformManagedObjectList(com.sun.management.GarbageCollectorMXBean.class, Arrays.asList(new IncrementalGarbageCollectorMXBean(), new CompleteGarbageCollectorMXBean())); + ImageSingletons.add(PinnedObjectSupport.class, new PinnedObjectSupportImpl()); if (ImageSingletons.contains(PerfManager.class)) { ImageSingletons.lookup(PerfManager.class).register(createPerfData()); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatArrayNode.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatArrayNode.java index b2e9f6f1c9b9..850d77c52b08 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatArrayNode.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatArrayNode.java @@ -46,11 +46,9 @@ public class FormatArrayNode extends FixedWithNextNode implements Lowerable { @Input protected ValueNode rememberedSet; @Input protected ValueNode unaligned; @Input protected ValueNode fillContents; - @Input protected ValueNode fillStartOffset; @Input protected ValueNode emitMemoryBarrier; - public FormatArrayNode(ValueNode memory, ValueNode hub, ValueNode length, ValueNode rememberedSet, ValueNode unaligned, ValueNode fillContents, ValueNode fillStartOffset, - ValueNode emitMemoryBarrier) { + public FormatArrayNode(ValueNode memory, ValueNode hub, ValueNode length, ValueNode rememberedSet, ValueNode unaligned, ValueNode fillContents, ValueNode emitMemoryBarrier) { super(TYPE, StampFactory.objectNonNull()); this.memory = memory; this.hub = hub; @@ -58,7 +56,6 @@ public FormatArrayNode(ValueNode memory, ValueNode hub, ValueNode length, ValueN this.rememberedSet = rememberedSet; this.unaligned = unaligned; this.fillContents = fillContents; - this.fillStartOffset = fillStartOffset; this.emitMemoryBarrier = emitMemoryBarrier; } @@ -86,15 +83,10 @@ public ValueNode getFillContents() { return fillContents; } - public ValueNode getFillStartOffset() { - return fillStartOffset; - } - public ValueNode getEmitMemoryBarrier() { return emitMemoryBarrier; } @NodeIntrinsic - public static native Object formatArray(Pointer memory, Class hub, int length, boolean rememberedSet, boolean unaligned, AllocationSnippets.FillContent fillContents, int fillStartOffset, - boolean emitMemoryBarrier); + public static native Object formatArray(Pointer memory, Class hub, int length, boolean rememberedSet, boolean unaligned, AllocationSnippets.FillContent fillContents, boolean emitMemoryBarrier); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatPodNode.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatPodNode.java index 3d3acb2d6fa9..d4733dfd1326 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatPodNode.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatPodNode.java @@ -47,11 +47,10 @@ public class FormatPodNode extends FixedWithNextNode implements Lowerable { @Input protected ValueNode rememberedSet; @Input protected ValueNode unaligned; @Input protected ValueNode fillContents; - @Input protected ValueNode fillStartOffset; private final boolean emitMemoryBarrier; - public FormatPodNode(ValueNode memory, ValueNode hub, ValueNode arrayLength, ValueNode referenceMap, ValueNode rememberedSet, - ValueNode unaligned, ValueNode fillContents, ValueNode fillStartOffset, boolean emitMemoryBarrier) { + public FormatPodNode(ValueNode memory, ValueNode hub, ValueNode arrayLength, ValueNode referenceMap, ValueNode rememberedSet, ValueNode unaligned, ValueNode fillContents, + boolean emitMemoryBarrier) { super(TYPE, StampFactory.objectNonNull()); this.memory = memory; this.hub = hub; @@ -60,7 +59,6 @@ public FormatPodNode(ValueNode memory, ValueNode hub, ValueNode arrayLength, Val this.rememberedSet = rememberedSet; this.unaligned = unaligned; this.fillContents = fillContents; - this.fillStartOffset = fillStartOffset; this.emitMemoryBarrier = emitMemoryBarrier; } @@ -92,15 +90,11 @@ public ValueNode getFillContents() { return fillContents; } - public ValueNode getFillStartOffset() { - return fillStartOffset; - } - public boolean getEmitMemoryBarrier() { return emitMemoryBarrier; } @NodeIntrinsic - public static native Object formatPod(Pointer memory, Class hub, int arrayLength, byte[] referenceMap, boolean rememberedSet, boolean unaligned, - AllocationSnippets.FillContent fillContents, int fillStartOffset, @ConstantNodeParameter boolean emitMemoryBarrier); + public static native Object formatPod(Pointer memory, Class hub, int arrayLength, byte[] referenceMap, boolean rememberedSet, boolean unaligned, AllocationSnippets.FillContent fillContents, + @ConstantNodeParameter boolean emitMemoryBarrier); } diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatStoredContinuationNode.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatStoredContinuationNode.java new file mode 100644 index 000000000000..f0aedf5a3630 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/graal/nodes/FormatStoredContinuationNode.java @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2022, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.genscavenge.graal.nodes; + +import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_16; +import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_16; + +import org.graalvm.compiler.core.common.type.StampFactory; +import org.graalvm.compiler.graph.NodeClass; +import org.graalvm.compiler.nodeinfo.NodeInfo; +import org.graalvm.compiler.nodes.FixedWithNextNode; +import org.graalvm.compiler.nodes.ValueNode; +import org.graalvm.compiler.nodes.spi.Lowerable; +import org.graalvm.word.Pointer; + +@NodeInfo(cycles = CYCLES_16, size = SIZE_16) +public class FormatStoredContinuationNode extends FixedWithNextNode implements Lowerable { + public static final NodeClass TYPE = NodeClass.create(FormatStoredContinuationNode.class); + + @Input protected ValueNode memory; + @Input protected ValueNode hub; + @Input protected ValueNode length; + @Input protected ValueNode rememberedSet; + @Input protected ValueNode unaligned; + private final boolean emitMemoryBarrier; + + public FormatStoredContinuationNode(ValueNode memory, ValueNode hub, ValueNode length, ValueNode rememberedSet, ValueNode unaligned, boolean emitMemoryBarrier) { + super(TYPE, StampFactory.objectNonNull()); + this.memory = memory; + this.hub = hub; + this.length = length; + this.rememberedSet = rememberedSet; + this.unaligned = unaligned; + this.emitMemoryBarrier = emitMemoryBarrier; + } + + public ValueNode getMemory() { + return memory; + } + + public ValueNode getHub() { + return hub; + } + + public ValueNode getLength() { + return length; + } + + public ValueNode getRememberedSet() { + return rememberedSet; + } + + public ValueNode getUnaligned() { + return unaligned; + } + + public boolean getEmitMemoryBarrier() { + return emitMemoryBarrier; + } + + @NodeIntrinsic + public static native Object formatStoredContinuation(Pointer memory, Class hub, int length, boolean rememberedSet, boolean unaligned, @ConstantNodeParameter boolean emitMemoryBarrier); +} diff --git a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/RememberedSet.java b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/RememberedSet.java index 56cb85c184f5..d6b1725a2967 100644 --- a/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/RememberedSet.java +++ b/substratevm/src/com.oracle.svm.core.genscavenge/src/com/oracle/svm/core/genscavenge/remset/RememberedSet.java @@ -27,7 +27,6 @@ import java.util.List; import org.graalvm.compiler.api.replacements.Fold; -import org.graalvm.compiler.nodes.gc.BarrierSet; import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.Platform; import org.graalvm.nativeimage.Platforms; @@ -38,25 +37,21 @@ import com.oracle.svm.core.genscavenge.GreyToBlackObjectVisitor; import com.oracle.svm.core.genscavenge.Space; import com.oracle.svm.core.genscavenge.UnalignedHeapChunk.UnalignedHeader; +import com.oracle.svm.core.heap.BarrierSetProvider; import com.oracle.svm.core.image.ImageHeapObject; import com.oracle.svm.core.util.HostedByteBufferPointer; -import jdk.vm.ci.meta.MetaAccessProvider; - /** * A remembered set keeps track of references between generations (from the old generation to the * young generation, or from the image heap to the runtime heap). During collections, the remembered * set is used to avoid scanning the entire image heap and old generation. */ -public interface RememberedSet { +public interface RememberedSet extends BarrierSetProvider { @Fold static RememberedSet get() { return ImageSingletons.lookup(RememberedSet.class); } - /** Creates the barrier set that the compiler should use for emitting read/write barriers. */ - BarrierSet createBarrierSet(MetaAccessProvider metaAccess); - /** Returns the header size of aligned chunks. */ UnsignedWord getHeaderSizeOfAlignedChunk(); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/UnmanagedMemoryUtil.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/UnmanagedMemoryUtil.java index 7686a4e05329..60f2d98f0547 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/UnmanagedMemoryUtil.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/UnmanagedMemoryUtil.java @@ -26,8 +26,11 @@ import org.graalvm.word.Pointer; import org.graalvm.word.UnsignedWord; +import org.graalvm.word.WordBase; import org.graalvm.word.WordFactory; +import com.oracle.svm.core.config.ConfigurationValues; + /** * The methods in this class are mainly used to fill or copy unmanaged (i.e., non-Java heap) * memory. None of the methods cares about Java semantics like GC barriers or the Java memory model. @@ -146,12 +149,46 @@ public static void copyLongsForward(Pointer from, Pointer to, UnsignedWord size) dst.writeLong(24, l24); offset = next; } + while (offset.belowThan(size)) { to.writeLong(offset, from.readLong(offset)); offset = offset.add(8); } } + @IntrinsicCandidate + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static void copyWordsForward(Pointer from, Pointer to, UnsignedWord size) { + int wordSize = ConfigurationValues.getTarget().wordSize; + int stepSize = 4 * wordSize; + Pointer src = from; + Pointer dst = to; + Pointer srcEnd = src.add(size); + + while (src.add(stepSize).belowOrEqual(srcEnd)) { + WordBase w0 = src.readWord(0 * wordSize); + WordBase w8 = src.readWord(1 * wordSize); + WordBase w16 = src.readWord(2 * wordSize); + WordBase w24 = src.readWord(3 * wordSize); + dst.writeWord(0 * wordSize, w0); + dst.writeWord(1 * wordSize, w8); + dst.writeWord(2 * wordSize, w16); + dst.writeWord(3 * wordSize, w24); + + src = src.add(stepSize); + dst = dst.add(stepSize); + } + + while (src.belowThan(srcEnd)) { + dst.writeWord(WordFactory.zero(), src.readWord(WordFactory.zero())); + src = src.add(wordSize); + dst = dst.add(wordSize); + } + + assert src.equal(srcEnd); + assert dst.equal(to.add(size)); + } + /** * Copy bytes from one memory area to another. */ diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/allocationprofile/AllocationSite.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/allocationprofile/AllocationSite.java index dc9e0903668f..f5356f138174 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/allocationprofile/AllocationSite.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/allocationprofile/AllocationSite.java @@ -163,10 +163,10 @@ public static List getSites() { return sortedSites; } - public static void dumpProfilingResultsOnShutdown(boolean isFirstIsolate) { - if (isFirstIsolate) { + public static RuntimeSupport.Hook getShutdownHook() { + return isFirstIsolate -> { dumpProfilingResults(); - } + }; } public static void dumpProfilingResults() { @@ -222,7 +222,7 @@ public List> getRequiredFeatures() { @Override public void afterRegistration(AfterRegistrationAccess access) { if (AllocationSite.Options.AllocationProfiling.getValue()) { - RuntimeSupport.getRuntimeSupport().addShutdownHook(AllocationSite::dumpProfilingResultsOnShutdown); + RuntimeSupport.getRuntimeSupport().addShutdownHook(AllocationSite.getShutdownHook()); } } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoAccess.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoAccess.java index d3ba608dc1d2..78179c1ff3d4 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoAccess.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoAccess.java @@ -287,6 +287,7 @@ public static NonmovableArray getStackReferenceMapEncoding(CodeInfo info) return cast(info).getStackReferenceMapEncoding(); } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static long lookupStackReferenceMapIndex(CodeInfo info, long ip) { return CodeInfoDecoder.lookupStackReferenceMapIndex(info, ip); } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoDecoder.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoDecoder.java index 0c443c7f3068..8f283a6783c9 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoDecoder.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/code/CodeInfoDecoder.java @@ -184,6 +184,7 @@ static long lookupDeoptimizationEntrypoint(CodeInfo info, long method, long enco return -1; } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) static long lookupStackReferenceMapIndex(CodeInfo info, long ip) { long entryIP = lookupEntryIP(ip); long entryOffset = loadEntryOffset(info, ip); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/nodes/NewStoredContinuationNode.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/nodes/NewStoredContinuationNode.java new file mode 100644 index 000000000000..d0480f3fea0d --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/nodes/NewStoredContinuationNode.java @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2022, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.graal.nodes; + +import org.graalvm.compiler.core.common.type.StampFactory; +import org.graalvm.compiler.core.common.type.TypeReference; +import org.graalvm.compiler.graph.NodeClass; +import org.graalvm.compiler.nodeinfo.NodeInfo; +import org.graalvm.compiler.nodes.ValueNode; +import org.graalvm.compiler.nodes.java.AbstractNewArrayNode; + +import jdk.vm.ci.meta.ResolvedJavaType; + +@NodeInfo +public final class NewStoredContinuationNode extends AbstractNewArrayNode { + public static final NodeClass TYPE = NodeClass.create(NewStoredContinuationNode.class); + + private final ResolvedJavaType instanceClass; + private final ResolvedJavaType elementType; + + public NewStoredContinuationNode(ResolvedJavaType instanceType, ResolvedJavaType elementType, ValueNode arrayLength) { + super(TYPE, StampFactory.objectNonNull(TypeReference.createExactTrusted(instanceType)), arrayLength, true, null); + this.instanceClass = instanceType; + this.elementType = elementType; + } + + /** + * Gets the instance class being allocated by this node. + * + * @return the instance class allocated + */ + public ResolvedJavaType instanceClass() { + return instanceClass; + } + + /** + * Gets the element type of the inlined array. + * + * @return the element type of the inlined array + */ + public ResolvedJavaType elementType() { + return elementType; + } + + @NodeIntrinsic + public static native Object allocate(@ConstantNodeParameter Class instanceType, @ConstantNodeParameter Class elementType, int arrayLength); +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/nodes/WriteStackPointerNode.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/nodes/WriteStackPointerNode.java new file mode 100644 index 000000000000..b06da7dfa890 --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/nodes/WriteStackPointerNode.java @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2022, 2022, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.graal.nodes; + +import org.graalvm.compiler.core.common.type.StampFactory; +import org.graalvm.compiler.graph.NodeClass; +import org.graalvm.compiler.lir.gen.LIRGeneratorTool; +import org.graalvm.compiler.nodeinfo.NodeCycles; +import org.graalvm.compiler.nodeinfo.NodeInfo; +import org.graalvm.compiler.nodeinfo.NodeSize; +import org.graalvm.compiler.nodes.FixedWithNextNode; +import org.graalvm.compiler.nodes.ValueNode; +import org.graalvm.compiler.nodes.spi.LIRLowerable; +import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool; +import org.graalvm.word.WordBase; + +import com.oracle.svm.core.FrameAccess; +import com.oracle.svm.core.ReservedRegisters; + +@NodeInfo(cycles = NodeCycles.CYCLES_1, size = NodeSize.SIZE_1) +public class WriteStackPointerNode extends FixedWithNextNode implements LIRLowerable { + public static final NodeClass TYPE = NodeClass.create(WriteStackPointerNode.class); + + @Input protected ValueNode value; + + protected WriteStackPointerNode(ValueNode value) { + super(TYPE, StampFactory.forVoid()); + this.value = value; + } + + @Override + public void generate(NodeLIRBuilderTool gen) { + LIRGeneratorTool tool = gen.getLIRGeneratorTool(); + gen.getLIRGeneratorTool().emitWriteRegister(ReservedRegisters.singleton().getFrameRegister(), gen.operand(value), tool.getLIRKind(FrameAccess.getWordStamp())); + } + + @NodeIntrinsic + public static native void write(WordBase value); +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/GCAllocationSupport.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/GCAllocationSupport.java index aa4697614b19..8ee4bbf14d83 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/GCAllocationSupport.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/GCAllocationSupport.java @@ -33,11 +33,13 @@ * look like in detail. */ public interface GCAllocationSupport { - ForeignCallDescriptor getSlowNewInstanceStub(); + ForeignCallDescriptor getNewInstanceStub(); - ForeignCallDescriptor getSlowNewArrayStub(); + ForeignCallDescriptor getNewArrayStub(); - ForeignCallDescriptor getSlowNewPodInstanceStub(); + ForeignCallDescriptor getNewStoredContinuationStub(); + + ForeignCallDescriptor getNewPodInstanceStub(); boolean useTLAB(); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SubstrateAllocationSnippets.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SubstrateAllocationSnippets.java index 73e2748b86d8..8d0a33ed0a5e 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SubstrateAllocationSnippets.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/graal/snippets/SubstrateAllocationSnippets.java @@ -86,6 +86,7 @@ import com.oracle.svm.core.graal.meta.SubstrateForeignCallsProvider; import com.oracle.svm.core.graal.nodes.ForeignCallWithExceptionNode; import com.oracle.svm.core.graal.nodes.NewPodInstanceNode; +import com.oracle.svm.core.graal.nodes.NewStoredContinuationNode; import com.oracle.svm.core.graal.nodes.SubstrateNewHybridInstanceNode; import com.oracle.svm.core.heap.Heap; import com.oracle.svm.core.heap.Pod; @@ -97,6 +98,8 @@ import com.oracle.svm.core.snippets.SnippetRuntime; import com.oracle.svm.core.snippets.SnippetRuntime.SubstrateForeignCallDescriptor; import com.oracle.svm.core.snippets.SubstrateForeignCallTarget; +import com.oracle.svm.core.thread.Continuation; +import com.oracle.svm.core.thread.ContinuationSupport; import com.oracle.svm.core.util.VMError; import jdk.vm.ci.meta.JavaKind; @@ -144,8 +147,77 @@ public Object allocateArray(@NonNullParameter DynamicHub hub, } @Snippet - public Object allocateInstanceDynamic(@NonNullParameter DynamicHub hub, @ConstantParameter FillContent fillContents, @ConstantParameter boolean emitMemoryBarrier, - @ConstantParameter boolean supportsBulkZeroing, @ConstantParameter boolean supportsOptimizedFilling, @ConstantParameter AllocationProfilingData profilingData) { + public Object allocateStoredContinuation(@NonNullParameter DynamicHub hub, + int length, + @ConstantParameter int arrayBaseOffset, + @ConstantParameter int log2ElementSize, + @ConstantParameter long ipOffset, + @ConstantParameter boolean emitMemoryBarrier, + @ConstantParameter AllocationProfilingData profilingData) { + Word thread = getTLABInfo(); + Word top = readTlabTop(thread); + Word end = readTlabEnd(thread); + ReplacementsUtil.dynamicAssert(end.subtract(top).belowOrEqual(Integer.MAX_VALUE), "TLAB is too large"); + + // A negative array length will result in an array size larger than the largest possible + // TLAB. Therefore, this case will always end up in the stub call. + UnsignedWord allocationSize = arrayAllocationSize(length, arrayBaseOffset, log2ElementSize); + Word newTop = top.add(allocationSize); + + Object result; + if (useTLAB() && probability(FAST_PATH_PROBABILITY, shouldAllocateInTLAB(allocationSize, true)) && probability(FAST_PATH_PROBABILITY, newTop.belowOrEqual(end))) { + writeTlabTop(thread, newTop); + emitPrefetchAllocate(newTop, true); + result = formatStoredContinuation(encodeAsTLABObjectHeader(hub), allocationSize, length, top, emitMemoryBarrier, ipOffset, profilingData.snippetCounters); + } else { + profilingData.snippetCounters.stub.inc(); + result = callSlowNewStoredContinuation(gcAllocationSupport().getNewStoredContinuationStub(), encodeAsTLABObjectHeader(hub), length); + } + profileAllocation(profilingData, allocationSize); + return piArrayCastToSnippetReplaceeStamp(verifyOop(result), length); + } + + @Snippet + public Object allocatePod(@NonNullParameter DynamicHub hub, + int arrayLength, + byte[] referenceMap, + @ConstantParameter boolean emitMemoryBarrier, + @ConstantParameter boolean maybeUnroll, + @ConstantParameter boolean supportsBulkZeroing, + @ConstantParameter boolean supportsOptimizedFilling, + @ConstantParameter AllocationSnippets.AllocationProfilingData profilingData) { + Word thread = getTLABInfo(); + Word top = readTlabTop(thread); + Word end = readTlabEnd(thread); + ReplacementsUtil.dynamicAssert(end.subtract(top).belowOrEqual(Integer.MAX_VALUE), "TLAB is too large"); + + // A negative array length will result in an array size larger than the largest possible + // TLAB. Therefore, this case will always end up in the stub call. + int arrayBaseOffset = LayoutEncoding.getArrayBaseOffsetAsInt(hub.getLayoutEncoding()); + UnsignedWord allocationSize = arrayAllocationSize(arrayLength, arrayBaseOffset, 0); + Word newTop = top.add(allocationSize); + + Object result; + if (useTLAB() && probability(FAST_PATH_PROBABILITY, shouldAllocateInTLAB(allocationSize, true)) && probability(FAST_PATH_PROBABILITY, newTop.belowOrEqual(end))) { + writeTlabTop(thread, newTop); + emitPrefetchAllocate(newTop, true); + result = formatPod(encodeAsTLABObjectHeader(hub), hub, allocationSize, arrayLength, referenceMap, top, AllocationSnippets.FillContent.WITH_ZEROES, + emitMemoryBarrier, maybeUnroll, supportsBulkZeroing, supportsOptimizedFilling, profilingData.snippetCounters); + } else { + profilingData.snippetCounters.stub.inc(); + result = callSlowNewPodInstance(gcAllocationSupport().getNewPodInstanceStub(), encodeAsTLABObjectHeader(hub), arrayLength, referenceMap); + } + profileAllocation(profilingData, allocationSize); + return piArrayCastToSnippetReplaceeStamp(verifyOop(result), arrayLength); + } + + @Snippet + public Object allocateInstanceDynamic(@NonNullParameter DynamicHub hub, + @ConstantParameter FillContent fillContents, + @ConstantParameter boolean emitMemoryBarrier, + @ConstantParameter boolean supportsBulkZeroing, + @ConstantParameter boolean supportsOptimizedFilling, + @ConstantParameter AllocationProfilingData profilingData) { return allocateInstanceDynamicImpl(hub, fillContents, emitMemoryBarrier, supportsBulkZeroing, supportsOptimizedFilling, profilingData); } @@ -158,8 +230,13 @@ protected Object allocateInstanceDynamicImpl(DynamicHub hub, FillContent fillCon } @Snippet - public Object allocateArrayDynamic(DynamicHub elementType, int length, @ConstantParameter FillContent fillContents, @ConstantParameter boolean emitMemoryBarrier, - @ConstantParameter boolean supportsBulkZeroing, @ConstantParameter boolean supportsOptimizedFilling, @ConstantParameter AllocationProfilingData profilingData) { + public Object allocateArrayDynamic(DynamicHub elementType, + int length, + @ConstantParameter FillContent fillContents, + @ConstantParameter boolean emitMemoryBarrier, + @ConstantParameter boolean supportsBulkZeroing, + @ConstantParameter boolean supportsOptimizedFilling, + @ConstantParameter AllocationProfilingData profilingData) { DynamicHub checkedArrayHub = getCheckedArrayHub(elementType); int layoutEncoding = checkedArrayHub.getLayoutEncoding(); @@ -269,53 +346,36 @@ private static void arrayHubErrorStub(DynamicHub elementType) { } } - @Snippet - public Object allocatePod(@NonNullParameter DynamicHub hub, int arrayLength, byte[] referenceMap, @ConstantParameter boolean emitMemoryBarrier, @ConstantParameter boolean maybeUnroll, - @ConstantParameter boolean supportsBulkZeroing, @ConstantParameter boolean supportsOptimizedFilling, @ConstantParameter AllocationSnippets.AllocationProfilingData profilingData) { - - Word thread = getTLABInfo(); - Word top = readTlabTop(thread); - Word end = readTlabEnd(thread); - ReplacementsUtil.dynamicAssert(end.subtract(top).belowOrEqual(Integer.MAX_VALUE), "TLAB is too large"); - - int arrayBaseOffset = LayoutEncoding.getArrayBaseOffsetAsInt(hub.getLayoutEncoding()); - UnsignedWord allocationSize = arrayAllocationSize(arrayLength, arrayBaseOffset, 0); - Word newTop = top.add(allocationSize); - - Object instance; - if (useTLAB() && probability(FAST_PATH_PROBABILITY, shouldAllocateInTLAB(allocationSize, true)) && probability(FAST_PATH_PROBABILITY, newTop.belowOrEqual(end))) { - writeTlabTop(thread, newTop); - emitPrefetchAllocate(newTop, true); - instance = formatPod(encodeAsTLABObjectHeader(hub), hub, arrayLength, referenceMap, top, AllocationSnippets.FillContent.WITH_ZEROES, - afterArrayLengthOffset(), emitMemoryBarrier, maybeUnroll, supportsBulkZeroing, supportsOptimizedFilling, profilingData.snippetCounters); - } else { - profilingData.snippetCounters.stub.inc(); - instance = callSlowNewPodInstance(gcAllocationSupport().getSlowNewPodInstanceStub(), encodeAsTLABObjectHeader(hub), arrayLength, afterArrayLengthOffset(), referenceMap); - } - profileAllocation(profilingData, allocationSize); - return piArrayCastToSnippetReplaceeStamp(verifyOop(instance), arrayLength); - } - @NodeIntrinsic(value = ForeignCallNode.class) - private static native Object callSlowNewPodInstance(@ConstantNodeParameter ForeignCallDescriptor descriptor, Word hub, int length, int fillStartOffset, byte[] referenceMap); + private static native Object callSlowNewPodInstance(@ConstantNodeParameter ForeignCallDescriptor descriptor, Word hub, int length, byte[] referenceMap); - public Object formatPod(Word objectHeader, DynamicHub hub, int arrayLength, byte[] referenceMap, Word memory, FillContent fillContents, int fillStartOffset, - boolean emitMemoryBarrier, boolean maybeUnroll, boolean supportsBulkZeroing, boolean supportsOptimizedFilling, AllocationSnippetCounters snippetCounters) { + public Object formatArray(Word hub, UnsignedWord allocationSize, int length, Word memory, FillContent fillContents, boolean emitMemoryBarrier, boolean maybeUnroll, boolean supportsBulkZeroing, + boolean supportsOptimizedFilling, AllocationSnippetCounters snippetCounters) { + return formatArray(hub, allocationSize, length, memory, fillContents, emitMemoryBarrier, SubstrateAllocationSnippets.afterArrayLengthOffset(), maybeUnroll, supportsBulkZeroing, + supportsOptimizedFilling, snippetCounters); + } - int layoutEncoding = hub.getLayoutEncoding(); - UnsignedWord allocationSize = LayoutEncoding.getArraySize(layoutEncoding, arrayLength); + public Object formatStoredContinuation(Word objectHeader, UnsignedWord allocationSize, int arrayLength, Word memory, boolean emitMemoryBarrier, long ipOffset, + AllocationSnippetCounters snippetCounters) { + Object result = formatArray(objectHeader, allocationSize, arrayLength, memory, FillContent.DO_NOT_FILL, false, false, false, false, snippetCounters); + memory.writeWord(WordFactory.unsigned(ipOffset), WordFactory.nullPointer(), LocationIdentity.init()); + emitMemoryBarrierIf(emitMemoryBarrier); + return result; + } - Object instance = formatArray(objectHeader, allocationSize, arrayLength, memory, fillContents, fillStartOffset, - false, maybeUnroll, supportsBulkZeroing, supportsOptimizedFilling, snippetCounters); + public Object formatPod(Word objectHeader, DynamicHub hub, UnsignedWord allocationSize, int arrayLength, byte[] referenceMap, Word memory, FillContent fillContents, boolean emitMemoryBarrier, + boolean maybeUnroll, boolean supportsBulkZeroing, boolean supportsOptimizedFilling, AllocationSnippetCounters snippetCounters) { + Object result = formatArray(objectHeader, allocationSize, arrayLength, memory, fillContents, false, maybeUnroll, supportsBulkZeroing, supportsOptimizedFilling, + snippetCounters); int fromOffset = ConfigurationValues.getObjectLayout().getArrayBaseOffset(JavaKind.Byte); - int toOffset = LayoutEncoding.getArrayBaseOffsetAsInt(layoutEncoding) + arrayLength - referenceMap.length; + int toOffset = LayoutEncoding.getArrayBaseOffsetAsInt(hub.getLayoutEncoding()) + arrayLength - referenceMap.length; for (int i = 0; i < referenceMap.length; i++) { byte b = ObjectAccess.readByte(referenceMap, fromOffset + i, byteArrayIdentity()); - ObjectAccess.writeByte(instance, toOffset + i, b, LocationIdentity.INIT_LOCATION); + ObjectAccess.writeByte(result, toOffset + i, b, LocationIdentity.INIT_LOCATION); } emitMemoryBarrierIf(emitMemoryBarrier); - return instance; + return result; } @Fold @@ -353,7 +413,7 @@ protected final int instanceHeaderSize() { } @Fold - protected static int afterArrayLengthOffset() { + public static int afterArrayLengthOffset() { return ConfigurationValues.getObjectLayout().getArrayLengthOffset() + ConfigurationValues.getObjectLayout().sizeInBytes(JavaKind.Int); } @@ -398,12 +458,12 @@ public static Word encodeAsTLABObjectHeader(DynamicHub hub) { @Override protected final Object callNewInstanceStub(Word objectHeader) { - return callSlowNewInstance(gcAllocationSupport().getSlowNewInstanceStub(), objectHeader); + return callSlowNewInstance(gcAllocationSupport().getNewInstanceStub(), objectHeader); } @Override - protected final Object callNewArrayStub(Word objectHeader, int length, int fillStartOffset) { - return callSlowNewArray(gcAllocationSupport().getSlowNewArrayStub(), objectHeader, length, fillStartOffset); + protected final Object callNewArrayStub(Word objectHeader, int length) { + return callSlowNewArray(gcAllocationSupport().getNewArrayStub(), objectHeader, length); } @Override @@ -415,7 +475,10 @@ protected final Object callNewMultiArrayStub(Word objectHeader, int rank, Word d private static native Object callSlowNewInstance(@ConstantNodeParameter ForeignCallDescriptor descriptor, Word hub); @NodeIntrinsic(value = ForeignCallNode.class) - private static native Object callSlowNewArray(@ConstantNodeParameter ForeignCallDescriptor descriptor, Word hub, int length, int fillStartOffset); + private static native Object callSlowNewArray(@ConstantNodeParameter ForeignCallDescriptor descriptor, Word hub, int length); + + @NodeIntrinsic(value = ForeignCallNode.class) + private static native Object callSlowNewStoredContinuation(@ConstantNodeParameter ForeignCallDescriptor descriptor, Word hub, int length); @NodeIntrinsic(value = ForeignCallNode.class) private static native Object callNewMultiArray(@ConstantNodeParameter ForeignCallDescriptor descriptor, Word hub, int rank, Word dimensions); @@ -482,6 +545,7 @@ public static class Templates extends SubstrateTemplates { private final SnippetInfo validateNewInstanceClass; + private final SnippetInfo allocateStoredContinuation; private final SnippetInfo allocatePod; public Templates(OptionValues options, Providers providers, SubstrateAllocationSnippets receiver) { @@ -496,6 +560,12 @@ public Templates(OptionValues options, Providers providers, SubstrateAllocationS newmultiarray = snippet(SubstrateAllocationSnippets.class, "newmultiarray", null, receiver, ALLOCATION_LOCATIONS); validateNewInstanceClass = snippet(SubstrateAllocationSnippets.class, "validateNewInstanceClass", null, receiver, ALLOCATION_LOCATIONS); + SnippetInfo allocateStoredContinuationSnippet = null; + if (Continuation.isSupported()) { + allocateStoredContinuationSnippet = snippet(SubstrateAllocationSnippets.class, "allocateStoredContinuation", null, receiver, ALLOCATION_LOCATIONS); + } + allocateStoredContinuation = allocateStoredContinuationSnippet; + SnippetInfo allocatePodSnippet = null; if (Pod.RuntimeSupport.isPresent()) { Object[] podLocations = SubstrateAllocationSnippets.ALLOCATION_LOCATIONS; @@ -515,6 +585,9 @@ public void registerLowering(Map, NodeLoweringProvider> lowerings.put(NewMultiArrayNode.class, new NewMultiArrayLowering()); lowerings.put(ValidateNewInstanceClassNode.class, new ValidateNewInstanceClassLowering()); + if (Continuation.isSupported()) { + lowerings.put(NewStoredContinuationNode.class, new NewStoredContinuationLowering()); + } if (Pod.RuntimeSupport.isPresent()) { lowerings.put(NewPodInstanceNode.class, new NewPodInstanceLowering()); } @@ -589,7 +662,7 @@ public void lower(SubstrateNewHybridInstanceNode node, LoweringTool tool) { ValueNode length = node.length(); DynamicHub hub = instanceClass.getHub(); int layoutEncoding = hub.getLayoutEncoding(); - int arrayBaseOffset = (int) LayoutEncoding.getArrayBaseOffset(layoutEncoding).rawValue(); + int arrayBaseOffset = LayoutEncoding.getArrayBaseOffsetAsInt(layoutEncoding); int log2ElementSize = LayoutEncoding.getArrayIndexShift(layoutEncoding); boolean fillContents = node.fillContents(); assert fillContents : "fillContents must be true for hybrid allocations"; @@ -612,6 +685,36 @@ public void lower(SubstrateNewHybridInstanceNode node, LoweringTool tool) { } } + private class NewStoredContinuationLowering implements NodeLoweringProvider { + @Override + public void lower(NewStoredContinuationNode node, LoweringTool tool) { + StructuredGraph graph = node.graph(); + if (graph.getGuardsStage() != GraphState.GuardsStage.AFTER_FSA) { + return; + } + + SharedType instanceClass = (SharedType) node.instanceClass(); + ValueNode length = node.length(); + DynamicHub hub = instanceClass.getHub(); + int layoutEncoding = hub.getLayoutEncoding(); + int arrayBaseOffset = LayoutEncoding.getArrayBaseOffsetAsInt(layoutEncoding); + int log2ElementSize = LayoutEncoding.getArrayIndexShift(layoutEncoding); + + ConstantNode hubConstant = ConstantNode.forConstant(SubstrateObjectConstant.forObject(hub), providers.getMetaAccess(), graph); + + Arguments args = new Arguments(allocateStoredContinuation, graph.getGuardsStage(), tool.getLoweringStage()); + args.add("hub", hubConstant); + args.add("length", length.isAlive() ? length : graph.addOrUniqueWithInputs(length)); + args.addConst("arrayBaseOffset", arrayBaseOffset); + args.addConst("log2ElementSize", log2ElementSize); + args.addConst("ipOffset", ContinuationSupport.singleton().getIPOffset()); + args.addConst("emitMemoryBarrier", node.emitMemoryBarrier()); + args.addConst("profilingData", getProfilingData(node, instanceClass)); + + template(node, args).instantiate(providers.getMetaAccess(), node, SnippetTemplate.DEFAULT_REPLACER, args); + } + } + private class NewArrayLowering implements NodeLoweringProvider { @Override public void lower(NewArrayNode node, LoweringTool tool) { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/HeapFeature.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/AllocationFeature.java similarity index 97% rename from substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/HeapFeature.java rename to substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/AllocationFeature.java index 424e0b4edcb6..f5d30671091c 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/HeapFeature.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/AllocationFeature.java @@ -32,7 +32,7 @@ import com.oracle.svm.core.graal.snippets.SubstrateAllocationSnippets; @AutomaticFeature -public class HeapFeature implements InternalFeature { +public class AllocationFeature implements InternalFeature { @Override public void duringSetup(DuringSetupAccess access) { if (!ImageSingletons.contains(SubstrateAllocationSnippets.class)) { diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/BarrierSetProvider.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/BarrierSetProvider.java new file mode 100644 index 000000000000..ebdf35f3cc0c --- /dev/null +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/BarrierSetProvider.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ +package com.oracle.svm.core.heap; + +import org.graalvm.compiler.nodes.gc.BarrierSet; + +import jdk.vm.ci.meta.MetaAccessProvider; + +public interface BarrierSetProvider { + /** + * Returns a suitable {@link org.graalvm.compiler.nodes.gc.BarrierSet} for the used garbage + * collector. + */ + BarrierSet createBarrierSet(MetaAccessProvider metaAccess); +} diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Heap.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Heap.java index 95a2c34bc449..aa6bdbc111be 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Heap.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Heap.java @@ -30,7 +30,6 @@ import java.util.List; import org.graalvm.compiler.api.replacements.Fold; -import org.graalvm.compiler.nodes.gc.BarrierSet; import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.IsolateThread; import org.graalvm.nativeimage.Platform; @@ -46,8 +45,6 @@ import com.oracle.svm.core.os.CommittedMemoryProvider; import com.oracle.svm.core.os.ImageHeapProvider; -import jdk.vm.ci.meta.MetaAccessProvider; - public abstract class Heap { @Fold public static Heap getHeap() { @@ -146,11 +143,6 @@ public List> getLoadedClasses() { /** Reset the heap to the normal execution state. */ public abstract void endSafepoint(); - /** - * Returns a suitable {@link BarrierSet} for the garbage collector that is used for this heap. - */ - public abstract BarrierSet createBarrierSet(MetaAccessProvider metaAccess); - /** * Returns a multiple to which the heap address space should be aligned to at runtime. * diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/InstanceReferenceMapDecoder.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/InstanceReferenceMapDecoder.java index e739238e6ccb..c203d92c0fe2 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/InstanceReferenceMapDecoder.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/InstanceReferenceMapDecoder.java @@ -32,7 +32,6 @@ import com.oracle.svm.core.util.DuplicatedInNativeCode; import com.oracle.svm.core.c.NonmovableArray; import com.oracle.svm.core.config.ConfigurationValues; -import com.oracle.svm.core.thread.Continuation; import com.oracle.svm.core.util.NonmovableByteArrayReader; import com.oracle.svm.core.util.TypedMemoryReader; @@ -43,10 +42,6 @@ public static boolean walkOffsetsFromPointer(Pointer baseAddress, NonmovableArra assert ReferenceMapIndex.denotesValidReferenceMap(referenceMapIndex); assert referenceMapEncoding.isNonNull(); - if (Continuation.isSupported() && referenceMapIndex == ReferenceMapIndex.STORED_CONTINUATION) { - return StoredContinuationAccess.walkReferences(baseAddress, visitor, holderObject); - } - Pointer position = NonmovableByteArrayReader.pointerTo(referenceMapEncoding, referenceMapIndex); int entryCount = TypedMemoryReader.getS4(position); position = position.add(4); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Pod.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Pod.java index a92cda205ad8..820fa07a64d0 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Pod.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/Pod.java @@ -126,7 +126,7 @@ public static Builder createExtending(Class superClass, Class facto private Builder(Class superClass, Class factoryInterface, Pod superPod) { assert superPod == null || (superClass == null && factoryInterface == null); if (!RuntimeSupport.isPresent()) { - throw new UnsupportedOperationException("Pods are not available in this native image. Only SerialGC currently supports pods."); + throw new UnsupportedOperationException("Pods are not available in this native image."); } if (superPod != null) { this.podInfo = superPod.podInfo; diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/PodReferenceMapDecoder.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/PodReferenceMapDecoder.java index f3134388c8e1..80f7a473161c 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/PodReferenceMapDecoder.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/PodReferenceMapDecoder.java @@ -24,6 +24,7 @@ */ package com.oracle.svm.core.heap; +import com.oracle.svm.core.annotate.DuplicatedInNativeCode; import org.graalvm.compiler.api.directives.GraalDirectives; import org.graalvm.compiler.nodes.java.ArrayLengthNode; import org.graalvm.compiler.word.BarrieredAccess; @@ -41,6 +42,7 @@ import com.oracle.svm.core.util.UnsignedUtils; public final class PodReferenceMapDecoder { + @DuplicatedInNativeCode @AlwaysInline("de-virtualize calls to ObjectReferenceVisitor") public static boolean walkOffsetsFromPointer(Pointer baseAddress, int layoutEncoding, ObjectReferenceVisitor visitor, Object obj) { int referenceSize = ConfigurationValues.getObjectLayout().getReferenceSize(); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ReferenceMapIndex.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ReferenceMapIndex.java index 86949ba1fd08..4989c633cbbe 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ReferenceMapIndex.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/ReferenceMapIndex.java @@ -40,12 +40,6 @@ public class ReferenceMapIndex { */ public static final int NO_REFERENCE_MAP = -1; - /** - * Reference map index value for {@link StoredContinuation} to indicate this instance needs - * special treatment during allocation and GC. - */ - public static final int STORED_CONTINUATION = -2; - public static boolean denotesEmptyReferenceMap(long referenceMapIndex) { return referenceMapIndex == EMPTY_REFERENCE_MAP || referenceMapIndex == NO_REFERENCE_MAP; } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/StoredContinuation.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/StoredContinuation.java index 5e4e1b6c3c44..c804805e5d86 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/StoredContinuation.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/StoredContinuation.java @@ -25,10 +25,14 @@ package com.oracle.svm.core.heap; import com.oracle.svm.core.hub.Hybrid; +import org.graalvm.compiler.word.Word; +import org.graalvm.nativeimage.c.function.CodePointer; /** Execution state of a continuation, use via {@link StoredContinuationAccess}. */ -@Hybrid(componentType = long.class) +@Hybrid(componentType = Word.class) public final class StoredContinuation { + CodePointer ip; + /** Must be allocated via {@link StoredContinuationAccess}. */ private StoredContinuation() { } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/StoredContinuationAccess.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/StoredContinuationAccess.java index 04db7edf6d0d..fa9b6ef89bb9 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/StoredContinuationAccess.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/StoredContinuationAccess.java @@ -26,13 +26,16 @@ import org.graalvm.compiler.api.directives.GraalDirectives; import org.graalvm.compiler.graph.Node.NodeIntrinsic; +import org.graalvm.compiler.nodes.extended.MembarNode; import org.graalvm.compiler.nodes.java.ArrayLengthNode; import org.graalvm.compiler.word.Word; import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.IsolateThread; import org.graalvm.nativeimage.StackValue; import org.graalvm.nativeimage.c.function.CodePointer; +import org.graalvm.nativeimage.c.struct.RawStructure; import org.graalvm.word.Pointer; +import org.graalvm.word.PointerBase; import org.graalvm.word.UnsignedWord; import org.graalvm.word.WordFactory; @@ -46,9 +49,10 @@ import com.oracle.svm.core.code.FrameInfoQueryResult; import com.oracle.svm.core.code.SimpleCodeInfoQueryResult; import com.oracle.svm.core.code.UntetheredCodeInfo; +import com.oracle.svm.core.config.ConfigurationValues; import com.oracle.svm.core.deopt.DeoptimizedFrame; import com.oracle.svm.core.deopt.Deoptimizer; -import com.oracle.svm.core.graal.nodes.SubstrateNewHybridInstanceNode; +import com.oracle.svm.core.graal.nodes.NewStoredContinuationNode; import com.oracle.svm.core.hub.LayoutEncoding; import com.oracle.svm.core.snippets.KnownIntrinsics; import com.oracle.svm.core.stack.JavaStackWalk; @@ -62,16 +66,18 @@ /** Helper for allocating and accessing {@link StoredContinuation} instances. */ public final class StoredContinuationAccess { - private static final int IP_OFFSET = 0; // instruction pointer of top frame - private static final int FRAMES_OFFSET = IP_OFFSET + Long.BYTES; - private StoredContinuationAccess() { } private static StoredContinuation allocate(int framesSize) { - // Using long[] to ensure that words are properly aligned. - int nlongs = Integer.divideUnsigned(FRAMES_OFFSET + framesSize, Long.BYTES); - StoredContinuation s = (StoredContinuation) SubstrateNewHybridInstanceNode.allocate(StoredContinuation.class, long.class, nlongs); + // Using Word[] to ensure that words are properly aligned. + int nwords = Integer.divideUnsigned(framesSize, ConfigurationValues.getTarget().wordSize); + assert nwords * ConfigurationValues.getTarget().wordSize == framesSize; + /* + * There is no need to zero the array part (i.e., the stack data) of the StoredContinuation, + * because the GC won't visit it if StoredContinuation.ip is null. + */ + StoredContinuation s = (StoredContinuation) NewStoredContinuationNode.allocate(StoredContinuation.class, Word.class, nwords); assert getFramesSizeInBytes(s) == framesSize; return s; } @@ -81,16 +87,16 @@ private static StoredContinuation allocate(int framesSize) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) private static int getSizeInBytes(StoredContinuation s) { - return arrayLength(s) * Long.BYTES; + return arrayLength(s) * ConfigurationValues.getTarget().wordSize; } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static int getFramesSizeInBytes(StoredContinuation s) { - return getSizeInBytes(s) - FRAMES_OFFSET; + return getSizeInBytes(s); } @Uninterruptible(reason = "Prevent GC during accesses via object address.", callerMustBe = true) - private static Pointer arrayAddress(StoredContinuation s) { + public static Pointer getFramesStart(StoredContinuation s) { int layout = KnownIntrinsics.readHub(s).getLayoutEncoding(); UnsignedWord baseOffset = LayoutEncoding.getArrayBaseOffset(layout); return Word.objectToUntrackedPointer(s).add(baseOffset); @@ -98,12 +104,7 @@ private static Pointer arrayAddress(StoredContinuation s) { @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static CodePointer getIP(StoredContinuation s) { - return arrayAddress(s).readWord(IP_OFFSET); - } - - @Uninterruptible(reason = "Prevent GC during accesses via object address.", callerMustBe = true) - public static Pointer getFramesStart(StoredContinuation s) { - return arrayAddress(s).add(FRAMES_OFFSET); + return s.ip; } public static int allocateToYield(Continuation c, Pointer baseSp, Pointer sp, CodePointer ip) { @@ -143,8 +144,8 @@ private static int allocateFromStack(Continuation cont, Pointer baseSp, Pointer @Uninterruptible(reason = "Prevent modifications to the stack while initializing instance and copying frames.") private static void fillUninterruptibly(StoredContinuation stored, CodePointer ip, Pointer sp, int size) { - arrayAddress(stored).writeWord(IP_OFFSET, ip); - UnmanagedMemoryUtil.copy(sp, getFramesStart(stored), WordFactory.unsigned(size)); + UnmanagedMemoryUtil.copyWordsForward(sp, getFramesStart(stored), WordFactory.unsigned(size)); + setIP(stored, ip); afterFill(stored); } @@ -154,9 +155,6 @@ private static void afterFill(StoredContinuation stored) { * Since its allocation, our StoredContinuation could have already been promoted to the old * generation and some references we just copied might point to the young generation and * need to be added to the remembered set. - * - * To support precise marking and pre-write barriers, we need to check first if the object - * needs barriers, then, on a slow path, individually copy references from stack frames. */ // Drop type info to not trigger compiler assertions about StoredContinuation in barriers Object opaque = GraalDirectives.opaque(stored); @@ -165,28 +163,36 @@ private static void afterFill(StoredContinuation stored) { public static StoredContinuation clone(StoredContinuation cont) { StoredContinuation clone = allocate(getFramesSizeInBytes(cont)); - return fillCloneUninterruptibly(cont, clone); + Object preparedData = ImageSingletons.lookup(ContinuationSupport.class).prepareCopy(cont); + return fillCloneUninterruptibly(cont, clone, preparedData); } @Uninterruptible(reason = "Prevent garbage collection while initializing instance and copying frames.") - private static StoredContinuation fillCloneUninterruptibly(StoredContinuation cont, StoredContinuation clone) { - CodePointer ip = ImageSingletons.lookup(ContinuationSupport.class).copyFrames(cont, clone); - // copyFrames() above may do something interruptible before uninterruptibly copying frames, - // so set IP only afterwards so that the object is considered uninitialized until then. - arrayAddress(clone).writeWord(IP_OFFSET, ip); + private static StoredContinuation fillCloneUninterruptibly(StoredContinuation cont, StoredContinuation clone, Object preparedData) { + CodePointer ip = ImageSingletons.lookup(ContinuationSupport.class).copyFrames(cont, clone, preparedData); + setIP(clone, ip); afterFill(clone); return clone; } - /** Derived from {@link InstanceReferenceMapDecoder#walkOffsetsFromPointer}. */ + @Uninterruptible(reason = "Prevent that the GC sees a partially initialized StoredContinuation.", callerMustBe = true) + private static void setIP(StoredContinuation cont, CodePointer ip) { + /* + * Once the ip is initialized, the GC may visit the object at any time (i.e., even while we + * are still executing uninterruptible code). Therefore, we must ensure that the store to + * the ip is only visible after all the stores that fill in the stack data. To guarantee + * that, we issue a STORE_STORE memory barrier before setting the ip. + */ + MembarNode.memoryBarrier(MembarNode.FenceKind.ALLOCATION_INIT); + cont.ip = ip; + } + @AlwaysInline("De-virtualize calls to ObjectReferenceVisitor") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static boolean walkReferences(Pointer baseAddress, ObjectReferenceVisitor visitor, Object holderObject) { - assert !Heap.getHeap().isInImageHeap(baseAddress); - - StoredContinuation s = (StoredContinuation) holderObject; - assert baseAddress.equal(Word.objectToUntrackedPointer(holderObject)); + public static boolean walkReferences(Object obj, ObjectReferenceVisitor visitor) { + assert !Heap.getHeap().isInImageHeap(obj) : "StoredContinuations in the image heap are read-only and don't need to be visited"; + StoredContinuation s = (StoredContinuation) obj; JavaStackWalk walk = StackValue.get(JavaStackWalk.class); if (!initWalk(s, walk)) { return true; // uninitialized, ignore @@ -198,7 +204,7 @@ public static boolean walkReferences(Pointer baseAddress, ObjectReferenceVisitor Object tether = CodeInfoAccess.acquireTether(untetheredCodeInfo); try { CodeInfo codeInfo = CodeInfoAccess.convert(untetheredCodeInfo, tether); - walkFrameReferences(walk, codeInfo, queryResult, visitor, holderObject); + walkFrameReferences(walk, codeInfo, queryResult, visitor, s); } finally { CodeInfoAccess.releaseTether(untetheredCodeInfo, tether); } @@ -207,30 +213,42 @@ public static boolean walkReferences(Pointer baseAddress, ObjectReferenceVisitor return true; } + @AlwaysInline("De-virtualize calls to visitor.") @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static boolean initWalk(StoredContinuation s, JavaStackWalk walk) { - CodePointer startIp = getIP(s); - if (startIp.isNull()) { - return false; // uninitialized + public static boolean walkFrames(StoredContinuation s, ContinuationStackFrameVisitor visitor, ContinuationStackFrameVisitorData data) { + assert !Heap.getHeap().isInImageHeap(s) : "StoredContinuations in the image heap are read-only and don't need to be visited"; + + JavaStackWalk walk = StackValue.get(JavaStackWalk.class); + if (!initWalk(s, walk)) { + return true; // uninitialized, ignore } - initWalk(s, walk, startIp); - return true; - } + SimpleCodeInfoQueryResult queryResult = StackValue.get(SimpleCodeInfoQueryResult.class); + do { + UntetheredCodeInfo untetheredCodeInfo = walk.getIPCodeInfo(); + Object tether = CodeInfoAccess.acquireTether(untetheredCodeInfo); + try { + CodeInfo codeInfo = CodeInfoAccess.convert(untetheredCodeInfo); + queryFrameCodeInfo(walk, codeInfo, queryResult); - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static void initWalk(StoredContinuation s, JavaStackWalk walk, CodePointer startIp) { - Pointer startSp = getFramesStart(s); - Pointer endSp = arrayAddress(s).add(getSizeInBytes(s)); + NonmovableArray referenceMapEncoding = CodeInfoAccess.getStackReferenceMapEncoding(codeInfo); + long referenceMapIndex = queryResult.getReferenceMapIndex(); + if (referenceMapIndex != ReferenceMapIndex.NO_REFERENCE_MAP) { + visitor.visitFrame(data, walk.getSP(), referenceMapEncoding, referenceMapIndex, visitor); + } + } finally { + CodeInfoAccess.releaseTether(untetheredCodeInfo, tether); + } + } while (JavaStackWalker.continueWalk(walk, queryResult, null)); - JavaStackWalker.initWalk(walk, startSp, endSp, startIp); - walk.setAnchor(WordFactory.nullPointer()); // never use an anchor of this platform thread + return true; } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public static void walkFrameReferences(JavaStackWalk walk, CodeInfo codeInfo, SimpleCodeInfoQueryResult queryResult, ObjectReferenceVisitor visitor, Object holderObject) { + private static void queryFrameCodeInfo(JavaStackWalk walk, CodeInfo codeInfo, SimpleCodeInfoQueryResult queryResult) { Pointer sp = walk.getSP(); CodePointer ip = walk.getPossiblyStaleIP(); + if (codeInfo.isNull()) { throw JavaStackWalker.reportUnknownFrameEncountered(sp, ip, null); } @@ -238,14 +256,44 @@ public static void walkFrameReferences(JavaStackWalk walk, CodeInfo codeInfo, Si VMError.guarantee(Deoptimizer.checkDeoptimized(sp) == null); CodeInfoAccess.lookupCodeInfo(codeInfo, CodeInfoAccess.relativeIP(codeInfo, ip), queryResult); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static boolean initWalk(StoredContinuation s, JavaStackWalk walk) { + CodePointer startIp = getIP(s); + if (startIp.isNull()) { + return false; // uninitialized + } + initWalk(s, walk, startIp); + return true; + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static void initWalk(StoredContinuation s, JavaStackWalk walk, CodePointer startIp) { + Pointer startSp = getFramesStart(s); + Pointer endSp = getFramesStart(s).add(getSizeInBytes(s)); + JavaStackWalker.initWalkStoredContinuation(walk, startSp, endSp, startIp); + } + + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static void walkFrameReferences(JavaStackWalk walk, CodeInfo codeInfo, SimpleCodeInfoQueryResult queryResult, ObjectReferenceVisitor visitor, Object holderObject) { + queryFrameCodeInfo(walk, codeInfo, queryResult); NonmovableArray referenceMapEncoding = CodeInfoAccess.getStackReferenceMapEncoding(codeInfo); long referenceMapIndex = queryResult.getReferenceMapIndex(); if (referenceMapIndex != ReferenceMapIndex.NO_REFERENCE_MAP) { - CodeReferenceMapDecoder.walkOffsetsFromPointer(sp, referenceMapEncoding, referenceMapIndex, visitor, holderObject); + CodeReferenceMapDecoder.walkOffsetsFromPointer(walk.getSP(), referenceMapEncoding, referenceMapIndex, visitor, holderObject); } } + public abstract static class ContinuationStackFrameVisitor { + public abstract void visitFrame(ContinuationStackFrameVisitorData data, Pointer sp, NonmovableArray referenceMapEncoding, long referenceMapIndex, ContinuationStackFrameVisitor visitor); + } + + @RawStructure + public interface ContinuationStackFrameVisitorData extends PointerBase { + } + private static final class PreemptVisitor extends StackFrameVisitor { private final Pointer endSP; private boolean startFromNextFrame = false; diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/SubstrateReferenceMap.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/SubstrateReferenceMap.java index 86dc6d61a980..f70f05a7974f 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/SubstrateReferenceMap.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/heap/SubstrateReferenceMap.java @@ -45,12 +45,6 @@ import org.graalvm.nativeimage.ImageInfo; public class SubstrateReferenceMap extends ReferenceMap implements ReferenceMapEncoder.Input { - - /** - * Special reference map for {@link StoredContinuation}. - */ - public static final SubstrateReferenceMap STORED_CONTINUATION_REFERENCE_MAP = new SubstrateReferenceMap(); - /** * Stores the reference map data. 3 bits are currently required per entry: the first bit at * "offset" marks the offset in the reference map. The following bit at offset + 1 stores the diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/DynamicHub.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/DynamicHub.java index e332dca640c1..222bf3e6e3a9 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/DynamicHub.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/DynamicHub.java @@ -334,12 +334,12 @@ public void setModule(Module module) { @UnknownObjectField(types = ReflectionMetadata.class, canBeNull = true) private ReflectionMetadata reflectionMetadata; @Platforms(Platform.HOSTED_ONLY.class) - public DynamicHub(Class hostedJavaClass, String name, HubType hubType, ReferenceType referenceType, DynamicHub superType, DynamicHub componentHub, + public DynamicHub(Class hostedJavaClass, String name, int hubType, ReferenceType referenceType, DynamicHub superType, DynamicHub componentHub, String sourceFileName, int modifiers, ClassLoader classLoader, boolean isHidden, boolean isRecord, Class nestHost, boolean assertionStatus, boolean hasDefaultMethods, boolean declaresDefaultMethods, boolean isSealed, String simpleBinaryName, Object declaringClass) { this.hostedJavaClass = hostedJavaClass; this.name = name; - this.hubType = hubType.getValue(); + this.hubType = hubType; this.referenceType = referenceType.getValue(); this.superHub = superType; this.componentType = componentHub; @@ -602,6 +602,10 @@ public String getName() { return name; } + public int getHubType() { + return hubType; + } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public boolean isInstanceClass() { return HubType.isInstance(hubType); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/HubType.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/HubType.java index 83ccd91c9413..cae858082390 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/HubType.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/HubType.java @@ -28,47 +28,37 @@ import com.oracle.svm.core.Uninterruptible; @DuplicatedInNativeCode -public enum HubType { - // instance hubs - Instance(0), - InstanceReference(1), +public class HubType { + // Instance hubs. + public static final int INSTANCE = 0; + public static final int REFERENCE_INSTANCE = 1; + public static final int POD_INSTANCE = 2; + public static final int STORED_CONTINUATION_INSTANCE = 3; - // special hubs - PodInstance(2), - Other(3), + // Other hubs (heap objects never reference those hubs). + public static final int OTHER = 4; - // array hubs - TypeArray(4), - ObjectArray(5); - - private final int value; - - HubType(int value) { - this.value = value; - } - - @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) - public int getValue() { - return value; - } + // Array hubs. + public static final int PRIMITIVE_ARRAY = 5; + public static final int OBJECT_ARRAY = 6; @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean isInstance(int hubType) { - return hubType <= PodInstance.getValue(); + return hubType <= STORED_CONTINUATION_INSTANCE; } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean isReferenceInstance(int hubType) { - return hubType == InstanceReference.getValue(); + return hubType == REFERENCE_INSTANCE; } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean isPodInstance(int hubType) { - return hubType == PodInstance.getValue(); + return hubType == POD_INSTANCE; } @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static boolean isArray(int hubType) { - return hubType >= TypeArray.getValue(); + return hubType >= PRIMITIVE_ARRAY; } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/Hybrid.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/Hybrid.java index c421de700443..5ba011451d5e 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/Hybrid.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/Hybrid.java @@ -55,9 +55,9 @@ * * *

- * Hybrid objects have {@link HubType#Instance} but a {@link LayoutEncoding} like an array. This is - * important to keep in mind because methods such as {@link Class#isInstance} will return - * {@code true} and {@link Class#isArray()} will return {@code false}, while + * Hybrid objects have one of the instance {@link HubType}s but a {@link LayoutEncoding} like an + * array. This is important to keep in mind because methods such as {@link Class#isInstance} will + * return {@code true} and {@link Class#isArray()} will return {@code false}, while * {@link LayoutEncoding#isPureInstance} will return {@code false} and * {@link LayoutEncoding#isArrayLike} will return {@code true} for hybrid objects. */ diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/InteriorObjRefWalker.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/InteriorObjRefWalker.java index b551c57bf21b..a377bca5c2ce 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/InteriorObjRefWalker.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/hub/InteriorObjRefWalker.java @@ -39,6 +39,9 @@ import com.oracle.svm.core.heap.Pod; import com.oracle.svm.core.heap.PodReferenceMapDecoder; import com.oracle.svm.core.heap.ReferenceAccess; +import com.oracle.svm.core.heap.StoredContinuationAccess; +import com.oracle.svm.core.thread.Continuation; +import com.oracle.svm.core.util.VMError; /** * The vanilla walkObject and walkOffsetsFromPointer methods are not inlined, but there are @@ -63,34 +66,72 @@ public static boolean walkObject(final Object obj, final ObjectReferenceVisitor @AlwaysInline("Performance critical version") public static boolean walkObjectInline(final Object obj, final ObjectReferenceVisitor visitor) { final DynamicHub objHub = ObjectHeader.readDynamicHubFromObject(obj); - final int layoutEncoding = objHub.getLayoutEncoding(); final Pointer objPointer = Word.objectToUntrackedPointer(obj); - // Visit each Object reference in the array part of the Object. - if (LayoutEncoding.isArrayLikeWithObjectElements(layoutEncoding)) { - int length = ArrayLengthNode.arrayLength(obj); - int referenceSize = ConfigurationValues.getObjectLayout().getReferenceSize(); - boolean isCompressed = ReferenceAccess.singleton().haveCompressedReferences(); - - Pointer pos = objPointer.add(LayoutEncoding.getArrayBaseOffset(layoutEncoding)); - Pointer end = pos.add(WordFactory.unsigned(referenceSize).multiply(length)); - while (pos.belowThan(end)) { - final boolean visitResult = visitor.visitObjectReferenceInline(pos, 0, isCompressed, obj); - if (!visitResult) { - return false; - } - pos = pos.add(referenceSize); - } - } else if (Pod.RuntimeSupport.isPresent() && objHub.isPodInstanceClass()) { - if (!PodReferenceMapDecoder.walkOffsetsFromPointer(objPointer, layoutEncoding, visitor, obj)) { - return false; - } + switch (objHub.getHubType()) { + case HubType.INSTANCE: + case HubType.REFERENCE_INSTANCE: + return walkInstance(obj, visitor, objHub, objPointer); + case HubType.POD_INSTANCE: + return walkPod(obj, visitor, objHub, objPointer); + case HubType.STORED_CONTINUATION_INSTANCE: + return walkStoredContinuation(obj, visitor); + case HubType.OTHER: + return walkOther(); + case HubType.PRIMITIVE_ARRAY: + return true; + case HubType.OBJECT_ARRAY: + return walkObjectArray(obj, visitor, objHub, objPointer); } + throw VMError.shouldNotReachHere("Object with invalid hub type."); + } + + @AlwaysInline("Performance critical version") + private static boolean walkInstance(Object obj, ObjectReferenceVisitor visitor, DynamicHub objHub, Pointer objPointer) { NonmovableArray referenceMapEncoding = DynamicHubSupport.getReferenceMapEncoding(); long referenceMapIndex = objHub.getReferenceMapIndex(); // Visit Object reference in the fields of the Object. return InstanceReferenceMapDecoder.walkOffsetsFromPointer(objPointer, referenceMapEncoding, referenceMapIndex, visitor, obj); } + + @AlwaysInline("Performance critical version") + private static boolean walkPod(Object obj, ObjectReferenceVisitor visitor, DynamicHub objHub, Pointer objPointer) { + if (!Pod.RuntimeSupport.isPresent()) { + throw VMError.shouldNotReachHere("Pod objects cannot be in the heap if the pod support is disabled."); + } + return PodReferenceMapDecoder.walkOffsetsFromPointer(objPointer, objHub.getLayoutEncoding(), visitor, obj); + } + + @AlwaysInline("Performance critical version") + private static boolean walkStoredContinuation(Object obj, ObjectReferenceVisitor visitor) { + if (!Continuation.isSupported()) { + throw VMError.shouldNotReachHere("Stored continuation objects cannot be in the heap if the continuation support is disabled."); + } + return StoredContinuationAccess.walkReferences(obj, visitor); + } + + @AlwaysInline("Performance critical version") + private static boolean walkOther() { + throw VMError.shouldNotReachHere("Unexpected object with hub type 'other' in the heap."); + } + + @AlwaysInline("Performance critical version") + private static boolean walkObjectArray(Object obj, ObjectReferenceVisitor visitor, DynamicHub objHub, Pointer objPointer) { + int length = ArrayLengthNode.arrayLength(obj); + int referenceSize = ConfigurationValues.getObjectLayout().getReferenceSize(); + boolean isCompressed = ReferenceAccess.singleton().haveCompressedReferences(); + + Pointer pos = objPointer.add(LayoutEncoding.getArrayBaseOffset(objHub.getLayoutEncoding())); + Pointer end = pos.add(WordFactory.unsigned(referenceSize).multiply(length)); + while (pos.belowThan(end)) { + final boolean visitResult = visitor.visitObjectReferenceInline(pos, 0, isCompressed, obj); + if (!visitResult) { + return false; + } + pos = pos.add(referenceSize); + } + return true; + } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/stack/JavaStackWalker.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/stack/JavaStackWalker.java index f3789a6defbf..4fe4f4ad43bc 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/stack/JavaStackWalker.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/stack/JavaStackWalker.java @@ -77,16 +77,16 @@ private JavaStackWalker() { */ @Uninterruptible(reason = "Prevent deoptimization of stack frames while in this method.", callerMustBe = true) public static void initWalk(JavaStackWalk walk, Pointer startSP, CodePointer startIP) { - initWalk(walk, startSP, WordFactory.nullPointer(), startIP); + initWalk(walk, startSP, WordFactory.nullPointer(), startIP, JavaFrameAnchors.getFrameAnchor()); } @Uninterruptible(reason = "Prevent deoptimization of stack frames while in this method.", callerMustBe = true) - public static void initWalk(JavaStackWalk walk, Pointer startSP, Pointer endSP, CodePointer startIP) { + private static void initWalk(JavaStackWalk walk, Pointer startSP, Pointer endSP, CodePointer startIP, JavaFrameAnchor anchor) { walk.setSP(startSP); walk.setPossiblyStaleIP(startIP); walk.setStartSP(startSP); walk.setStartIP(startIP); - walk.setAnchor(JavaFrameAnchors.getFrameAnchor()); + walk.setAnchor(anchor); walk.setEndSP(endSP); if (startIP.isNonNull()) { // Storing the untethered object in a data structures requires that the caller and all @@ -97,13 +97,22 @@ public static void initWalk(JavaStackWalk walk, Pointer startSP, Pointer endSP, } } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) + public static void initWalkStoredContinuation(JavaStackWalk walk, Pointer startSP, Pointer endSP, CodePointer startIP) { + /* + * StoredContinuations don't need a Java frame anchor because we pin the thread (i.e., + * yielding is not possible) if any native code is called. + */ + initWalk(walk, startSP, endSP, startIP, WordFactory.nullPointer()); + } + /** * See {@link #initWalk(JavaStackWalk, Pointer, CodePointer)}, except that the instruction * pointer will be read from the stack later on. */ @Uninterruptible(reason = "Called from uninterruptible code.") public static void initWalk(JavaStackWalk walk, Pointer startSP) { - initWalk(walk, startSP, WordFactory.nullPointer(), WordFactory.nullPointer()); + initWalk(walk, startSP, (CodePointer) WordFactory.nullPointer()); assert walk.getIPCodeInfo().isNull() : "otherwise, the caller would have to be uninterruptible as well"; } @@ -245,6 +254,7 @@ public static RuntimeException reportUnknownFrameEncountered(Pointer sp, CodePoi } + @Uninterruptible(reason = "Prevent deoptimization of stack frames while in this method.") public static boolean walkCurrentThread(Pointer startSP, StackFrameVisitor visitor) { return walkCurrentThread(startSP, visitor, null); } @@ -267,14 +277,16 @@ public static boolean walkCurrentThread(Pointer startSP, CodePointer startIP @Uninterruptible(reason = "Prevent deoptimization of stack frames while in this method.") public static boolean walkCurrentThread(Pointer startSP, Pointer endSP, CodePointer startIP, ParameterizedStackFrameVisitor visitor, T data) { JavaStackWalk walk = StackValue.get(JavaStackWalk.class); - initWalk(walk, startSP, endSP, startIP); + initWalk(walk, startSP, endSP, startIP, JavaFrameAnchors.getFrameAnchor()); return doWalk(walk, visitor, data); } + @Uninterruptible(reason = "Prevent deoptimization of stack frames while in this method.") public static boolean walkThread(IsolateThread thread, StackFrameVisitor visitor) { return walkThread(thread, visitor, null); } + @Uninterruptible(reason = "Prevent deoptimization of stack frames while in this method.") public static boolean walkThread(IsolateThread thread, ParameterizedStackFrameVisitor visitor, T data) { return walkThread(thread, WordFactory.nullPointer(), visitor, data); } @@ -294,7 +306,7 @@ public static boolean walkThread(IsolateThread thread, Pointer endSP, Parame public static void walkThreadAtSafepoint(Pointer startSP, Pointer endSP, CodePointer startIP, StackFrameVisitor visitor) { assert VMOperation.isInProgressAtSafepoint(); JavaStackWalk walk = StackValue.get(JavaStackWalk.class); - initWalk(walk, startSP, endSP, startIP); + initWalk(walk, startSP, endSP, startIP, JavaFrameAnchors.getFrameAnchor()); doWalk(walk, visitor, null); } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/Continuation.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/Continuation.java index 6333a2350986..50b40947d592 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/Continuation.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/Continuation.java @@ -31,9 +31,8 @@ import org.graalvm.word.Pointer; import org.graalvm.word.WordFactory; -import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.NeverInline; -import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.heap.StoredContinuation; import com.oracle.svm.core.heap.StoredContinuationAccess; import com.oracle.svm.core.heap.VMOperationInfos; @@ -123,7 +122,6 @@ private void enter0(boolean isContinue) { * which passes an object result. */ @NeverInline("Accesses caller stack pointer and return address.") - @Uninterruptible(reason = "Prevent safepoint checks between copying frames and farReturn.") private Object enter1(boolean isContinue) { Pointer callerSP = KnownIntrinsics.readCallerStackPointer(); CodePointer callerIP = KnownIntrinsics.readReturnAddress(); @@ -131,29 +129,22 @@ private Object enter1(boolean isContinue) { assert sp.isNull() && ip.isNull() && baseSP.isNull(); if (isContinue) { - assert stored != null; + StoredContinuation cont = this.stored; + assert cont != null; + this.ip = callerIP; + this.sp = callerSP; + this.baseSP = currentSP; + this.stored = null; - int totalSize = StoredContinuationAccess.getFramesSizeInBytes(stored); - Pointer topSP = currentSP.subtract(totalSize); + int framesSize = StoredContinuationAccess.getFramesSizeInBytes(cont); + Pointer topSP = currentSP.subtract(framesSize); if (!StackOverflowCheck.singleton().isWithinBounds(topSP)) { throw ImplicitExceptions.CACHED_STACK_OVERFLOW_ERROR; } - // copyFrames() may do something interruptible before uninterruptibly copying frames. - // Code must not rely on remaining uninterruptible until after frames were copied. - CodePointer enterIP = ImageSingletons.lookup(ContinuationSupport.class).copyFrames(stored, topSP); - - /* - * NO CALLS BEYOND THIS POINT! They would overwrite the frames we just copied. - */ - - this.ip = callerIP; - this.sp = callerSP; - this.baseSP = currentSP; - this.stored = null; - KnownIntrinsics.farReturn(FREEZE_OK, topSP, enterIP, false); + Object preparedData = ImageSingletons.lookup(ContinuationSupport.class).prepareCopy(cont); + ContinuationSupport.enter(cont, topSP, preparedData); throw VMError.shouldNotReachHere(); - } else { assert stored == null; this.ip = callerIP; @@ -166,7 +157,6 @@ private Object enter1(boolean isContinue) { } @NeverInline("Needs a separate frame which is part of the continuation stack that we can eventually return to.") - @Uninterruptible(reason = "Not actually, but because caller is uninterruptible.", calleeMustBe = false) private void enter2() { try { target.run(); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/ContinuationSupport.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/ContinuationSupport.java index 079863d5aa4c..268b69be5877 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/ContinuationSupport.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/ContinuationSupport.java @@ -24,48 +24,90 @@ */ package com.oracle.svm.core.thread; +import org.graalvm.compiler.api.replacements.Fold; +import org.graalvm.nativeimage.ImageSingletons; +import org.graalvm.nativeimage.Platform; +import org.graalvm.nativeimage.Platforms; import org.graalvm.nativeimage.c.function.CodePointer; import org.graalvm.word.Pointer; +import org.graalvm.word.WordFactory; -import com.oracle.svm.core.AlwaysInline; +import com.oracle.svm.core.NeverInline; import com.oracle.svm.core.Uninterruptible; +import com.oracle.svm.core.UnmanagedMemoryUtil; +import com.oracle.svm.core.config.ConfigurationValues; +import com.oracle.svm.core.graal.nodes.WriteStackPointerNode; import com.oracle.svm.core.heap.StoredContinuation; import com.oracle.svm.core.heap.StoredContinuationAccess; +import com.oracle.svm.core.snippets.KnownIntrinsics; public class ContinuationSupport { - @AlwaysInline("If not inlined, this method could overwrite its own frame.") + private long ipOffset; + + @Platforms(Platform.HOSTED_ONLY.class) + protected ContinuationSupport() { + } + + @Fold + public static ContinuationSupport singleton() { + return ImageSingletons.lookup(ContinuationSupport.class); + } + + @Platforms(Platform.HOSTED_ONLY.class) + public void setIPOffset(long value) { + assert ipOffset == 0; + ipOffset = value; + } + + public long getIPOffset() { + assert ipOffset != 0; + return ipOffset; + } + + public Object prepareCopy(@SuppressWarnings("unused") StoredContinuation storedCont) { + return null; + } + + /** + * This method reserves the extra stack space for the continuation. Be careful when modifying + * the code or the arguments of this method because we need the guarantee the following + * invariants: + *

    + *
  • The method must not contain any stack accesses as they would be relative to the + * manipulated (and therefore incorrect) stack pointer.
  • + *
  • The method must never return because the stack pointer would be incorrect + * afterwards.
  • + *
  • Only uninterruptible code may be executed once this method is called.
  • + *
+ */ + @NeverInline("Modifies the stack pointer manually, which breaks stack accesses.") + @Uninterruptible(reason = "Manipulates the stack pointer.") + public static void enter(StoredContinuation storedCont, Pointer topSP, Object preparedData) { + WriteStackPointerNode.write(topSP); + enter0(storedCont, topSP, preparedData); + } + + @NeverInline("The caller modified the stack pointer manually, so we need a new stack frame.") @Uninterruptible(reason = "Copies stack frames containing references.") - public CodePointer copyFrames(StoredContinuation storedCont, Pointer to) { - int totalSize = StoredContinuationAccess.getFramesSizeInBytes(storedCont); - CodePointer storedIP = StoredContinuationAccess.getIP(storedCont); - Pointer frameData = StoredContinuationAccess.getFramesStart(storedCont); + private static void enter0(StoredContinuation storedCont, Pointer topSP, Object preparedData) { + // copyFrames() may do something interruptible before uninterruptibly copying frames. + // Code must not rely on remaining uninterruptible until after frames were copied. + CodePointer enterIP = singleton().copyFrames(storedCont, topSP, preparedData); + KnownIntrinsics.farReturn(Continuation.FREEZE_OK, topSP, enterIP, false); + } - /* - * NO CALLS BEYOND THIS POINT! They would overwrite the frames we are copying. - */ + @Uninterruptible(reason = "Copies stack frames containing references.") + protected CodePointer copyFrames(StoredContinuation storedCont, Pointer topSP, @SuppressWarnings("unused") Object preparedData) { + int totalSize = StoredContinuationAccess.getFramesSizeInBytes(storedCont); + assert totalSize % ConfigurationValues.getTarget().wordSize == 0; - int offset = 0; - for (int next = offset + 32; next < totalSize; next += 32) { - Pointer src = frameData.add(offset); - Pointer dst = to.add(offset); - long l0 = src.readLong(0); - long l8 = src.readLong(8); - long l16 = src.readLong(16); - long l24 = src.readLong(24); - dst.writeLong(0, l0); - dst.writeLong(8, l8); - dst.writeLong(16, l16); - dst.writeLong(24, l24); - offset = next; - } - for (; offset < totalSize; offset++) { - to.writeByte(offset, frameData.readByte(offset)); - } - return storedIP; + Pointer frameData = StoredContinuationAccess.getFramesStart(storedCont); + UnmanagedMemoryUtil.copyWordsForward(frameData, topSP, WordFactory.unsigned(totalSize)); + return StoredContinuationAccess.getIP(storedCont); } @Uninterruptible(reason = "Copies stack frames containing references.") - public CodePointer copyFrames(StoredContinuation fromCont, StoredContinuation toCont) { - return copyFrames(fromCont, StoredContinuationAccess.getFramesStart(toCont)); + public CodePointer copyFrames(StoredContinuation fromCont, StoredContinuation toCont, Object preparedData) { + return copyFrames(fromCont, StoredContinuationAccess.getFramesStart(toCont), preparedData); } } diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/ContinuationsFeature.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/ContinuationsFeature.java index b0f2fc6ae0a9..ac318bf57f4b 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/ContinuationsFeature.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/ContinuationsFeature.java @@ -24,6 +24,8 @@ */ package com.oracle.svm.core.thread; +import java.lang.reflect.Field; + import org.graalvm.compiler.serviceprovider.JavaVersionUtil; import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.hosted.Feature; @@ -73,6 +75,9 @@ public void beforeAnalysis(BeforeAnalysisAccess access) { ImageSingletons.add(ContinuationSupport.class, new ContinuationSupport()); } + Field ipField = ReflectionUtil.lookupField(StoredContinuation.class, "ip"); + access.registerAsAccessed(ipField); + access.registerReachabilityHandler(a -> access.registerAsInHeap(StoredContinuation.class), ReflectionUtil.lookupMethod(StoredContinuationAccess.class, "allocate", int.class)); } else { @@ -80,6 +85,15 @@ public void beforeAnalysis(BeforeAnalysisAccess access) { } } + @Override + public void beforeCompilation(BeforeCompilationAccess access) { + if (Continuation.isSupported()) { + Field ipField = ReflectionUtil.lookupField(StoredContinuation.class, "ip"); + long offset = access.objectFieldOffset(ipField); + ContinuationSupport.singleton().setIPOffset(offset); + } + } + static void abortIfUnsupported() { if (!Continuation.isSupported()) { throw UserError.abort("Continuation support is used, but not available. Use JDK 17 with option %s, or JDK 19 with preview features enabled.", diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/LoomSupport.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/LoomSupport.java index bfbfa1bb4f9f..33b8888649f1 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/LoomSupport.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/LoomSupport.java @@ -24,9 +24,6 @@ */ package com.oracle.svm.core.thread; -import static com.oracle.svm.core.SubstrateOptions.UseEpsilonGC; -import static com.oracle.svm.core.SubstrateOptions.UseSerialGC; - import org.graalvm.compiler.api.replacements.Fold; import org.graalvm.compiler.serviceprovider.JavaVersionUtil; @@ -34,7 +31,7 @@ public final class LoomSupport { private static final boolean isEnabled; static { boolean enabled = false; - if (JavaVersionUtil.JAVA_SPEC == 19 && (UseSerialGC.getValue() || UseEpsilonGC.getValue())) { + if (JavaVersionUtil.JAVA_SPEC == 19) { try { enabled = (Boolean) Class.forName("jdk.internal.misc.PreviewFeatures") .getDeclaredMethod("isEnabled").invoke(null); diff --git a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/VMOperation.java b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/VMOperation.java index 2fa2280cf860..762ad5332b07 100644 --- a/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/VMOperation.java +++ b/substratevm/src/com.oracle.svm.core/src/com/oracle/svm/core/thread/VMOperation.java @@ -156,6 +156,7 @@ public static void guaranteeNotInProgress(String message) { } } + @Uninterruptible(reason = "Called from uninterruptible code.", mayBeInlined = true) public static void guaranteeInProgressAtSafepoint(String message) { if (!isInProgressAtSafepoint()) { throw VMError.shouldNotReachHere(message); diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/ConcurrentReachabilityHandler.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/ConcurrentReachabilityHandler.java index 9e418d537786..1183d832c09a 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/ConcurrentReachabilityHandler.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/ConcurrentReachabilityHandler.java @@ -35,7 +35,7 @@ import org.graalvm.nativeimage.hosted.Feature; import com.oracle.graal.pointsto.meta.AnalysisElement; -import com.oracle.graal.pointsto.meta.AnalysisElement.ElementReachableNotification; +import com.oracle.graal.pointsto.meta.AnalysisElement.ElementNotification; import com.oracle.graal.pointsto.meta.AnalysisElement.MethodOverrideReachableNotification; import com.oracle.graal.pointsto.meta.AnalysisElement.SubtypeReachableNotification; import com.oracle.graal.pointsto.meta.AnalysisMetaAccess; @@ -49,7 +49,7 @@ @AutomaticFeature public class ConcurrentReachabilityHandler implements ReachabilityHandler, Feature { - private final Map, ElementReachableNotification> reachabilityNotifications = new ConcurrentHashMap<>(); + private final Map, ElementNotification> reachabilityNotifications = new ConcurrentHashMap<>(); public static ConcurrentReachabilityHandler singleton() { return ImageSingletons.lookup(ConcurrentReachabilityHandler.class); @@ -113,7 +113,7 @@ private void registerConcurrentReachabilityHandler(BeforeAnalysisAccessImpl acce * by each AnalysisElement, i.e., each trigger, and are removed as soon as they are * notified. */ - ElementReachableNotification notification = reachabilityNotifications.computeIfAbsent(callback, ElementReachableNotification::new); + ElementNotification notification = reachabilityNotifications.computeIfAbsent(callback, ElementNotification::new); if (notification.isNotified()) { /* Already notified from an earlier registration, nothing to do. */ diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/NativeImageGenerator.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/NativeImageGenerator.java index 15218446c42f..e8e2dc2d7ef4 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/NativeImageGenerator.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/NativeImageGenerator.java @@ -215,7 +215,7 @@ import com.oracle.svm.core.graal.snippets.TypeSnippets; import com.oracle.svm.core.graal.word.SubstrateWordOperationPlugins; import com.oracle.svm.core.graal.word.SubstrateWordTypes; -import com.oracle.svm.core.heap.Heap; +import com.oracle.svm.core.heap.BarrierSetProvider; import com.oracle.svm.core.heap.RestrictHeapAccessCallees; import com.oracle.svm.core.hub.DynamicHub; import com.oracle.svm.core.hub.LayoutEncoding; @@ -1084,7 +1084,7 @@ public static Inflation createBigBang(OptionValues options, TargetDescription ta * added to the universe. */ aMetaAccess.lookupJavaType(Reference.class).registerAsReachable(); - BarrierSet barrierSet = ImageSingletons.lookup(Heap.class).createBarrierSet(aMetaAccess); + BarrierSet barrierSet = ImageSingletons.lookup(BarrierSetProvider.class).createBarrierSet(aMetaAccess); SubstratePlatformConfigurationProvider platformConfig = new SubstratePlatformConfigurationProvider(barrierSet); MetaAccessExtensionProvider aMetaAccessExtensionProvider = HostedConfiguration.instance().createAnalysisMetaAccessExtensionProvider(); LoweringProvider aLoweringProvider = SubstrateLoweringProvider.createForHosted(aMetaAccess, null, platformConfig, aMetaAccessExtensionProvider); diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SVMHost.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SVMHost.java index 9d68e9952480..eda4fd0653dc 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SVMHost.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/SVMHost.java @@ -96,6 +96,7 @@ import com.oracle.svm.core.graal.meta.SubstrateForeignCallsProvider; import com.oracle.svm.core.graal.stackvalue.StackValueNode; import com.oracle.svm.core.graal.thread.VMThreadLocalAccess; +import com.oracle.svm.core.heap.StoredContinuation; import com.oracle.svm.core.heap.Target_java_lang_ref_Reference; import com.oracle.svm.core.hub.DynamicHub; import com.oracle.svm.core.hub.HubType; @@ -105,6 +106,7 @@ import com.oracle.svm.core.jdk.SealedClassSupport; import com.oracle.svm.core.option.HostedOptionKey; import com.oracle.svm.core.option.SubstrateOptionsParser; +import com.oracle.svm.core.thread.Continuation; import com.oracle.svm.core.util.HostedStringDeduplication; import com.oracle.svm.core.util.VMError; import com.oracle.svm.hosted.classinitialization.ClassInitializationSupport; @@ -454,24 +456,25 @@ public UnsafeAutomaticSubstitutionProcessor getAutomaticSubstitutionProcessor() return automaticSubstitutions; } - private static HubType computeHubType(AnalysisType type) { + private static int computeHubType(AnalysisType type) { if (type.isArray()) { if (type.getComponentType().isPrimitive() || type.getComponentType().isWordType()) { - return HubType.TypeArray; + return HubType.PRIMITIVE_ARRAY; } else { - return HubType.ObjectArray; + return HubType.OBJECT_ARRAY; } } else if (type.isInstanceClass()) { if (Reference.class.isAssignableFrom(type.getJavaClass())) { - return HubType.InstanceReference; + return HubType.REFERENCE_INSTANCE; } else if (PodSupport.isPresent() && PodSupport.singleton().isPodClass(type.getJavaClass())) { - return HubType.PodInstance; + return HubType.POD_INSTANCE; + } else if (Continuation.isSupported() && type.getJavaClass() == StoredContinuation.class) { + return HubType.STORED_CONTINUATION_INSTANCE; } assert !Target_java_lang_ref_Reference.class.isAssignableFrom(type.getJavaClass()) : "should not see substitution type here"; - return HubType.Instance; - } else { - return HubType.Other; + return HubType.INSTANCE; } + return HubType.OTHER; } private static ReferenceType computeReferenceType(AnalysisType type) { diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/analysis/flow/SVMMethodTypeFlowBuilder.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/analysis/flow/SVMMethodTypeFlowBuilder.java index b5add5735d12..c1b39ef20d48 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/analysis/flow/SVMMethodTypeFlowBuilder.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/analysis/flow/SVMMethodTypeFlowBuilder.java @@ -38,19 +38,15 @@ import com.oracle.graal.pointsto.PointsToAnalysis; import com.oracle.graal.pointsto.flow.MethodTypeFlowBuilder; -import com.oracle.graal.pointsto.flow.SourceTypeFlow; import com.oracle.graal.pointsto.flow.TypeFlow; import com.oracle.graal.pointsto.flow.builder.TypeFlowBuilder; import com.oracle.graal.pointsto.meta.AnalysisField; import com.oracle.graal.pointsto.meta.AnalysisType; import com.oracle.graal.pointsto.meta.PointsToAnalysisMethod; -import com.oracle.graal.pointsto.typestate.TypeState; import com.oracle.svm.core.graal.thread.CompareAndSetVMThreadLocalNode; -import com.oracle.svm.core.graal.thread.LoadVMThreadLocalNode; import com.oracle.svm.core.graal.thread.StoreVMThreadLocalNode; import com.oracle.svm.core.meta.SubstrateObjectConstant; import com.oracle.svm.core.util.UserError.UserException; -import com.oracle.svm.core.util.VMError; import com.oracle.svm.hosted.NativeImageOptions; import com.oracle.svm.hosted.SVMHost; import com.oracle.svm.hosted.substitute.ComputedValueField; @@ -174,38 +170,13 @@ protected void checkUnsafeOffset(ValueNode base, ValueNode offsetNode) { @Override protected boolean delegateNodeProcessing(FixedNode n, TypeFlowsOfNodes state) { - if (n instanceof LoadVMThreadLocalNode) { - LoadVMThreadLocalNode node = (LoadVMThreadLocalNode) n; - Stamp stamp = node.stamp(NodeView.DEFAULT); - if (stamp instanceof ObjectStamp) { - ObjectStamp objStamp = (ObjectStamp) stamp; - VMError.guarantee(!objStamp.isEmpty()); - - TypeFlowBuilder result; - if (objStamp.isExactType()) { - /* - * The node has an exact type. Create a source type flow. This works with - * allocation site sensitivity because the StoreVMThreadLocal is modeled by - * writing the objects to the all-instantiated. - */ - result = TypeFlowBuilder.create(bb, node, SourceTypeFlow.class, () -> { - SourceTypeFlow src = new SourceTypeFlow(sourcePosition(node), TypeState.forExactType(bb, (AnalysisType) objStamp.type(), !objStamp.nonNull())); - flowsGraph.addMiscEntryFlow(src); - return src; - }); - } else { - /* Use a type state which consists of the entire node's type hierarchy. */ - AnalysisType type = (AnalysisType) (objStamp.type() == null ? bb.getObjectType() : objStamp.type()); - result = TypeFlowBuilder.create(bb, node, TypeFlow.class, () -> { - TypeFlow proxy = bb.analysisPolicy().proxy(sourcePosition(node), type.getTypeFlow(bb, true)); - flowsGraph.addMiscEntryFlow(proxy); - return proxy; - }); - } - state.add(node, result); - return true; - } - } else if (n instanceof StoreVMThreadLocalNode) { + /* + * LoadVMThreadLocalNode is handled by the default node processing in + * MethodTypeFlowBuilder.TypeFlowsOfNodes.lookup(), i.e., it creates a source type flow when + * the node has an exact type. This works with allocation site sensitivity because the + * StoreVMThreadLocal is modeled by writing the objects to the all-instantiated. + */ + if (n instanceof StoreVMThreadLocalNode) { StoreVMThreadLocalNode node = (StoreVMThreadLocalNode) n; storeVMThreadLocal(state, node, node.getValue()); return true; diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/code/SharedRuntimeConfigurationBuilder.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/code/SharedRuntimeConfigurationBuilder.java index 95857a78b719..db67f15a80fe 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/code/SharedRuntimeConfigurationBuilder.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/code/SharedRuntimeConfigurationBuilder.java @@ -27,7 +27,6 @@ import java.util.EnumMap; import java.util.function.Function; -import com.oracle.svm.hosted.HostedConfiguration; import org.graalvm.compiler.api.replacements.SnippetReflectionProvider; import org.graalvm.compiler.core.common.spi.ConstantFieldProvider; import org.graalvm.compiler.core.common.spi.ForeignCallsProvider; @@ -57,7 +56,8 @@ import com.oracle.svm.core.graal.meta.SubstrateSnippetReflectionProvider; import com.oracle.svm.core.graal.meta.SubstrateStampProvider; import com.oracle.svm.core.graal.word.SubstrateWordTypes; -import com.oracle.svm.core.heap.Heap; +import com.oracle.svm.core.heap.BarrierSetProvider; +import com.oracle.svm.hosted.HostedConfiguration; import com.oracle.svm.hosted.SVMHost; import com.oracle.svm.hosted.c.NativeLibraries; import com.oracle.svm.hosted.classinitialization.ClassInitializationSupport; @@ -111,7 +111,7 @@ public SharedRuntimeConfigurationBuilder build() { SnippetReflectionProvider snippetReflection = createSnippetReflectionProvider(); ForeignCallsProvider foreignCalls = createForeignCallsProvider(registerConfigs.get(ConfigKind.NORMAL)); p = createProviders(null, constantReflection, constantFieldProvider, foreignCalls, null, null, stampProvider, snippetReflection, null, null, null); - BarrierSet barrierSet = ImageSingletons.lookup(Heap.class).createBarrierSet(metaAccess); + BarrierSet barrierSet = ImageSingletons.lookup(BarrierSetProvider.class).createBarrierSet(metaAccess); PlatformConfigurationProvider platformConfig = new SubstratePlatformConfigurationProvider(barrierSet); MetaAccessExtensionProvider metaAccessExtensionProvider = HostedConfiguration.instance().createCompilationMetaAccessExtensionProvider(metaAccess); p = createProviders(null, constantReflection, constantFieldProvider, foreignCalls, null, null, stampProvider, snippetReflection, platformConfig, metaAccessExtensionProvider, null); diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/config/HybridLayout.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/config/HybridLayout.java index d9077ce9d1a1..8228b75aacf0 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/config/HybridLayout.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/config/HybridLayout.java @@ -61,7 +61,7 @@ public static boolean canInstantiateAsInstance(HostedType clazz) { } private final ObjectLayout layout; - private final HostedType arrayType; + private final HostedType arrayComponentType; private final HostedField arrayField; private final HostedField typeIDSlotsField; private final int arrayBaseOffset; @@ -73,18 +73,18 @@ public HybridLayout(Class hybridClass, ObjectLayout layout, HostedMetaAccess public HybridLayout(HostedInstanceClass hybridClass, ObjectLayout layout, MetaAccessProvider metaAccess) { this.layout = layout; HybridLayoutSupport.HybridInfo hybridInfo = HybridLayoutSupport.singleton().inspectHybrid(hybridClass, metaAccess); - this.arrayType = hybridInfo.arrayType; + this.arrayComponentType = hybridInfo.arrayComponentType; this.arrayField = hybridInfo.arrayField; this.typeIDSlotsField = hybridInfo.typeIDSlotsField; this.arrayBaseOffset = NumUtil.roundUp(hybridClass.getAfterFieldsOffset(), layout.sizeInBytes(getArrayElementStorageKind())); } - public HostedType getArrayType() { - return arrayType; + public HostedType getArrayComponentType() { + return arrayComponentType; } public JavaKind getArrayElementStorageKind() { - return arrayType.getComponentType().getStorageKind(); + return arrayComponentType.getStorageKind(); } public int getArrayBaseOffset() { @@ -92,7 +92,7 @@ public int getArrayBaseOffset() { } public long getArrayElementOffset(int index) { - return getArrayBaseOffset() + index * layout.sizeInBytes(getArrayElementStorageKind()); + return getArrayBaseOffset() + ((long) index) * layout.sizeInBytes(getArrayElementStorageKind()); } public long getTotalSize(int length) { diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/config/HybridLayoutSupport.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/config/HybridLayoutSupport.java index 6e5cb64e702b..b855e56bf34c 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/config/HybridLayoutSupport.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/config/HybridLayoutSupport.java @@ -30,7 +30,6 @@ import org.graalvm.nativeimage.ImageSingletons; import com.oracle.svm.core.hub.Hybrid; -import com.oracle.svm.hosted.meta.HostedArrayClass; import com.oracle.svm.hosted.meta.HostedField; import com.oracle.svm.hosted.meta.HostedInstanceClass; import com.oracle.svm.hosted.meta.HostedType; @@ -82,28 +81,27 @@ protected HybridInfo inspectHybrid(HostedInstanceClass hybridClass, MetaAccessPr } } - HostedType arrayType; + HostedType arrayComponentType; boolean arrayTypeIsSet = (annotation.componentType() != void.class); if (foundArrayField != null) { - arrayType = foundArrayField.getType(); + arrayComponentType = foundArrayField.getType().getComponentType(); - assert !arrayTypeIsSet || arrayType.equals(metaAccess.lookupJavaType(annotation.componentType()).getArrayClass()) : // + assert !arrayTypeIsSet || arrayComponentType.equals(metaAccess.lookupJavaType(annotation.componentType())) : // "@Hybrid.componentType must match the type of a @Hybrid.Array field when both are present"; } else { assert arrayTypeIsSet : "@Hybrid.componentType must be set when no @Hybrid.Array field is present (if present, ensure it is reachable)"; - arrayType = (HostedArrayClass) metaAccess.lookupJavaType(annotation.componentType()).getArrayClass(); + arrayComponentType = (HostedType) metaAccess.lookupJavaType(annotation.componentType()); } - assert arrayType.isArray(); - return new HybridInfo(arrayType, foundArrayField, foundTypeIDSlotsField); + return new HybridInfo(arrayComponentType, foundArrayField, foundTypeIDSlotsField); } public static class HybridInfo { - public final HostedType arrayType; + public final HostedType arrayComponentType; public final HostedField arrayField; public final HostedField typeIDSlotsField; - public HybridInfo(HostedType arrayType, HostedField arrayField, HostedField typeIDSlotsField) { - this.arrayType = arrayType; + public HybridInfo(HostedType arrayComponentType, HostedField arrayField, HostedField typeIDSlotsField) { + this.arrayComponentType = arrayComponentType; this.arrayField = arrayField; this.typeIDSlotsField = typeIDSlotsField; } diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/heap/PodSupport.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/heap/PodSupport.java index 5cce3ccc774a..3348b35d0122 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/heap/PodSupport.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/heap/PodSupport.java @@ -48,7 +48,6 @@ import org.graalvm.nativeimage.ImageSingletons; import org.graalvm.nativeimage.hosted.Feature; -import com.oracle.svm.core.SubstrateOptions; import com.oracle.svm.core.annotate.AutomaticFeature; import com.oracle.svm.core.deopt.DeoptTest; import com.oracle.svm.core.heap.Pod; @@ -75,7 +74,7 @@ static boolean isPresent() { static PodSupport singleton() { if (!ImageSingletons.contains(PodSupport.class)) { - throw UserError.abort("Pods are not available in this native image build. Only SerialGC currently supports pods."); + throw UserError.abort("Pods are not available in this native image build."); } return ImageSingletons.lookup(PodSupport.class); } @@ -116,11 +115,6 @@ final class PodFeature implements PodSupport, Feature { private volatile boolean instantiated = false; private boolean sealed = false; - @Override - public boolean isInConfiguration(IsInConfigurationAccess access) { - return SubstrateOptions.UseSerialGC.getValue() || SubstrateOptions.UseEpsilonGC.getValue(); - } - @Override public void afterRegistration(AfterRegistrationAccess access) { ImageSingletons.add(PodSupport.class, this); diff --git a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/meta/UniverseBuilder.java b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/meta/UniverseBuilder.java index 42327a1974c7..3dbb405883fa 100644 --- a/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/meta/UniverseBuilder.java +++ b/substratevm/src/com.oracle.svm.hosted/src/com/oracle/svm/hosted/meta/UniverseBuilder.java @@ -40,6 +40,7 @@ import java.util.Set; import java.util.concurrent.ForkJoinTask; +import com.oracle.svm.core.heap.StoredContinuation; import org.graalvm.collections.Pair; import org.graalvm.compiler.core.common.NumUtil; import org.graalvm.compiler.debug.DebugContext; @@ -76,8 +77,6 @@ import com.oracle.svm.core.heap.FillerObject; import com.oracle.svm.core.heap.InstanceReferenceMapEncoder; import com.oracle.svm.core.heap.ReferenceMapEncoder; -import com.oracle.svm.core.heap.ReferenceMapIndex; -import com.oracle.svm.core.heap.StoredContinuation; import com.oracle.svm.core.heap.SubstrateReferenceMap; import com.oracle.svm.core.hub.DynamicHub; import com.oracle.svm.core.hub.DynamicHubSupport; @@ -363,7 +362,9 @@ private void buildProfilingInformation() { CEntryPointLiteral.class, BoxedRelocatedPointer.class, FunctionPointerHolder.class, - SubstrateMethodAccessor.class, SubstrateConstructorAccessor.class, + StoredContinuation.class, + SubstrateMethodAccessor.class, + SubstrateConstructorAccessor.class, FillerObject.class)); private void collectMonitorFieldInfo(BigBang bb) { @@ -939,12 +940,7 @@ private void buildHubs() { ReferenceMapEncoder.Input referenceMap = referenceMaps.get(type); assert referenceMap != null; assert ((SubstrateReferenceMap) referenceMap).hasNoDerivedOffsets(); - long referenceMapIndex; - if (referenceMap == SubstrateReferenceMap.STORED_CONTINUATION_REFERENCE_MAP) { - referenceMapIndex = ReferenceMapIndex.STORED_CONTINUATION; - } else { - referenceMapIndex = referenceMapEncoder.lookupEncoding(referenceMap); - } + long referenceMapIndex = referenceMapEncoder.lookupEncoding(referenceMap); DynamicHub hub = type.getHub(); hub.setData(layoutHelper, type.getTypeID(), monitorOffset, type.getTypeCheckStart(), type.getTypeCheckRange(), type.getTypeCheckSlot(), @@ -953,10 +949,6 @@ private void buildHubs() { } private static ReferenceMapEncoder.Input createReferenceMap(HostedType type) { - if (type.getJavaClass().equals(StoredContinuation.class)) { - return SubstrateReferenceMap.STORED_CONTINUATION_REFERENCE_MAP; - } - HostedField[] fields = type.getInstanceFields(true); SubstrateReferenceMap referenceMap = new SubstrateReferenceMap(); diff --git a/sulong/CHANGELOG.md b/sulong/CHANGELOG.md index c3e85f143b1b..b354fa8fab44 100644 --- a/sulong/CHANGELOG.md +++ b/sulong/CHANGELOG.md @@ -1,3 +1,9 @@ +# Version 22.3.0 + +Changes: + +* Updated LLVM toolchain to version 14.0.6. + # Version 22.2.0 New Features: diff --git a/sulong/mx.sulong/mx_sulong_llvm_config.py b/sulong/mx.sulong/mx_sulong_llvm_config.py index 6b271b66781e..e2d9592bb7d6 100644 --- a/sulong/mx.sulong/mx_sulong_llvm_config.py +++ b/sulong/mx.sulong/mx_sulong_llvm_config.py @@ -31,10 +31,10 @@ # GENERATED BY 'mx generate-llvm-config'. DO NOT MODIFY. # LLVM version string. -VERSION = "14.0.3" +VERSION = "14.0.6" # Major version of the LLVM API. VERSION_MAJOR = 14 # Minor version of the LLVM API. VERSION_MINOR = 0 # Patch version of the LLVM API. -VERSION_PATCH = 3 +VERSION_PATCH = 6 diff --git a/sulong/projects/com.oracle.truffle.llvm.toolchain.config/src/com/oracle/truffle/llvm/toolchain/config/LLVMConfig.java b/sulong/projects/com.oracle.truffle.llvm.toolchain.config/src/com/oracle/truffle/llvm/toolchain/config/LLVMConfig.java index f2c7d7042859..948ba50119ab 100644 --- a/sulong/projects/com.oracle.truffle.llvm.toolchain.config/src/com/oracle/truffle/llvm/toolchain/config/LLVMConfig.java +++ b/sulong/projects/com.oracle.truffle.llvm.toolchain.config/src/com/oracle/truffle/llvm/toolchain/config/LLVMConfig.java @@ -39,11 +39,11 @@ public abstract class LLVMConfig { private LLVMConfig() {} /** LLVM version string. */ - public static final String VERSION = "14.0.3"; + public static final String VERSION = "14.0.6"; /** Major version of the LLVM API. */ public static final int VERSION_MAJOR = 14; /** Minor version of the LLVM API. */ public static final int VERSION_MINOR = 0; /** Patch version of the LLVM API. */ - public static final int VERSION_PATCH = 3; + public static final int VERSION_PATCH = 6; } diff --git a/truffle/src/com.oracle.truffle.api/src/com/oracle/truffle/api/Truffle.java b/truffle/src/com.oracle.truffle.api/src/com/oracle/truffle/api/Truffle.java index 02891e94fdf8..0058b3b011d1 100644 --- a/truffle/src/com.oracle.truffle.api/src/com/oracle/truffle/api/Truffle.java +++ b/truffle/src/com.oracle.truffle.api/src/com/oracle/truffle/api/Truffle.java @@ -42,7 +42,6 @@ import java.security.AccessController; import java.security.PrivilegedAction; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.ServiceConfigurationError; @@ -109,7 +108,10 @@ private static TruffleRuntime initRuntime() { return AccessController.doPrivileged(new PrivilegedAction() { public TruffleRuntime run() { String runtimeClassName = System.getProperty("truffle.TruffleRuntime"); - if (runtimeClassName != null && runtimeClassName.length() > 0) { + if (runtimeClassName != null && !runtimeClassName.isEmpty()) { + if (runtimeClassName.equals(DefaultTruffleRuntime.class.getName())) { + return new DefaultTruffleRuntime(); + } try { ClassLoader cl = Thread.currentThread().getContextClassLoader(); Class runtimeClass = Class.forName(runtimeClassName, false, cl); @@ -120,8 +122,17 @@ public TruffleRuntime run() { } } - List> loaders = Collections.singletonList(ServiceLoader.load(TruffleRuntimeAccess.class)); - TruffleRuntimeAccess access = selectTruffleRuntimeAccess(loaders); + Class lookupClass = Truffle.class; + ModuleLayer moduleLayer = lookupClass.getModule().getLayer(); + TruffleRuntimeAccess access; + if (moduleLayer != null) { + access = selectTruffleRuntimeAccess(List.of(ServiceLoader.load(moduleLayer, TruffleRuntimeAccess.class))); + } else { + access = selectTruffleRuntimeAccess(List.of(ServiceLoader.load(TruffleRuntimeAccess.class, lookupClass.getClassLoader()))); + } + if (access == null) { + access = selectTruffleRuntimeAccess(List.of(ServiceLoader.load(TruffleRuntimeAccess.class))); + } if (access != null) { exportTo(access.getClass()); diff --git a/truffle/src/com.oracle.truffle.api/src/com/oracle/truffle/api/impl/DefaultTruffleRuntime.java b/truffle/src/com.oracle.truffle.api/src/com/oracle/truffle/api/impl/DefaultTruffleRuntime.java index b8c8d519f80b..8a421251b5d0 100644 --- a/truffle/src/com.oracle.truffle.api/src/com/oracle/truffle/api/impl/DefaultTruffleRuntime.java +++ b/truffle/src/com.oracle.truffle.api/src/com/oracle/truffle/api/impl/DefaultTruffleRuntime.java @@ -263,7 +263,17 @@ static Iterable load(Class service) { throw new InternalError(e); } } else { - return ServiceLoader.load(service); + ModuleLayer moduleLayer = truffleModule.getLayer(); + Iterable services; + if (moduleLayer != null) { + services = ServiceLoader.load(moduleLayer, service); + } else { + services = ServiceLoader.load(service, DefaultTruffleRuntime.class.getClassLoader()); + } + if (!services.iterator().hasNext()) { + services = ServiceLoader.load(service); + } + return services; } } } diff --git a/vm/mx.vm/suite.py b/vm/mx.vm/suite.py index 040c3b33de41..91756311e00b 100644 --- a/vm/mx.vm/suite.py +++ b/vm/mx.vm/suite.py @@ -57,7 +57,7 @@ }, { "name": "truffleruby", - "version": "4054469bbd985db7dc5c86299004ff86a2835baf", + "version": "60822c2b38940c0d8ce1f36447782c8160df2c7c", "dynamic": True, "urls": [ {"url": "https://github.com/oracle/truffleruby.git", "kind": "git"},