WebKit Bugzilla
Attachment 368527 Details for
Bug 196943
: Add a baseline tracelet JIT
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
WIP
c-backup.diff (text/plain), 158.81 KB, created by
Saam Barati
on 2019-04-29 18:48:43 PDT
(
hide
)
Description:
WIP
Filename:
MIME Type:
Creator:
Saam Barati
Created:
2019-04-29 18:48:43 PDT
Size:
158.81 KB
patch
obsolete
>Index: JSTests/stress/bit-op-with-object-returning-int32.js >=================================================================== >--- JSTests/stress/bit-op-with-object-returning-int32.js (revision 244505) >+++ JSTests/stress/bit-op-with-object-returning-int32.js (working copy) >@@ -14,6 +14,7 @@ var o = { valueOf: () => 0b1101 }; > for (var i = 0; i < 10000; i++) > assert(bitAnd(0b11, o), 0b1); > >+print(numberOfDFGCompiles(bitAnd)); > assert(numberOfDFGCompiles(bitAnd) <= 1, true); > > function bitOr(a, b) { >Index: Source/JavaScriptCore/bytecode/ArithProfile.h >=================================================================== >--- Source/JavaScriptCore/bytecode/ArithProfile.h (revision 244505) >+++ Source/JavaScriptCore/bytecode/ArithProfile.h (working copy) >@@ -310,6 +310,11 @@ private: > friend class JSC::LLIntOffsetsExtractor; > }; > >+struct TraceProfile { >+ unsigned start; >+ unsigned end; // Not inclusive of this instruction. >+}; >+ > } // namespace JSC > > namespace WTF { >Index: Source/JavaScriptCore/bytecode/BytecodeDumper.cpp >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeDumper.cpp (revision 244505) >+++ Source/JavaScriptCore/bytecode/BytecodeDumper.cpp (working copy) >@@ -110,6 +110,25 @@ void BytecodeDumper<Block>::dumpIdentifi > } > } > >+template<> >+void BytecodeDumper<UnlinkedCodeBlock>::dumpPreciseJumpTargets() >+{ >+} >+ >+template<> >+void BytecodeDumper<CodeBlock>::dumpPreciseJumpTargets() >+{ >+ m_out.printf("\nPreciseJumpTargets = { "); >+ Vector<InstructionStream::Offset, 32> jumpTargets; >+ computePreciseJumpTargets(m_block, jumpTargets); >+ for (size_t i = 0; i < jumpTargets.size(); ++i) { >+ m_out.print(jumpTargets[i]); >+ if (i + 1 < jumpTargets.size()) >+ m_out.print(", "); >+ } >+ m_out.printf(" }\n"); >+} >+ > template<class Block> > void BytecodeDumper<Block>::dumpConstants() > { >@@ -225,6 +244,7 @@ void BytecodeDumper<Block>::dumpBlock(Bl > dumper.dumpExceptionHandlers(); > dumper.dumpSwitchJumpTables(); > dumper.dumpStringSwitchJumpTables(); >+ dumper.dumpPreciseJumpTargets(); > > out.printf("\n"); > } >Index: Source/JavaScriptCore/bytecode/BytecodeDumper.h >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeDumper.h (revision 244505) >+++ Source/JavaScriptCore/bytecode/BytecodeDumper.h (working copy) >@@ -82,6 +82,7 @@ private: > void dumpExceptionHandlers(); > void dumpSwitchJumpTables(); > void dumpStringSwitchJumpTables(); >+ void dumpPreciseJumpTargets(); > > void dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&); > >Index: Source/JavaScriptCore/bytecode/BytecodeList.rb >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeList.rb (revision 244505) >+++ Source/JavaScriptCore/bytecode/BytecodeList.rb (working copy) >@@ -60,6 +60,7 @@ types [ > :ArrayProfile, > :ArrayAllocationProfile, > :ObjectAllocationProfile, >+ :TraceProfile, > ] > > namespace :Special do >@@ -660,6 +661,14 @@ op_group :BinaryJmp, > > op :loop_hint > >+op :trace_hint, >+ metadata: { >+ entrypoint: uintptr_t, >+ traceProfile: TraceProfile, >+ count: int, >+ shouldCompile: bool, >+ } >+ > op_group :SwitchValue, > [ > :switch_imm, >Index: Source/JavaScriptCore/bytecode/BytecodeUseDef.h >=================================================================== >--- Source/JavaScriptCore/bytecode/BytecodeUseDef.h (revision 244505) >+++ Source/JavaScriptCore/bytecode/BytecodeUseDef.h (working copy) >@@ -76,6 +76,7 @@ void computeUsesForBytecodeOffset(Block* > case op_debug: > case op_jneq_ptr: > case op_loop_hint: >+ case op_trace_hint: > case op_jmp: > case op_new_object: > case op_enter: >@@ -321,6 +322,7 @@ void computeDefsForBytecodeOffset(Block* > case op_jbelow: > case op_jbeloweq: > case op_loop_hint: >+ case op_trace_hint: > case op_switch_imm: > case op_switch_char: > case op_switch_string: >Index: Source/JavaScriptCore/bytecode/CallLinkStatus.cpp >=================================================================== >--- Source/JavaScriptCore/bytecode/CallLinkStatus.cpp (revision 244505) >+++ Source/JavaScriptCore/bytecode/CallLinkStatus.cpp (working copy) >@@ -342,7 +342,7 @@ CallLinkStatus CallLinkStatus::computeFo > // fast-path-slow-path control-flow-diamond style of IC inlining. It's either all fast > // path or it's a full IC. So, for them, if there is an IC status then it means case (1). > >- bool checkStatusFirst = context->optimizedCodeBlock->jitType() == JITCode::FTLJIT; >+ bool checkStatusFirst = context->optimizedCodeBlock->jitType() == JITType::FTLJIT; > > auto bless = [&] (CallLinkStatus& result) { > if (!context->isInlined(codeOrigin)) >Index: Source/JavaScriptCore/bytecode/CodeBlock.cpp >=================================================================== >--- Source/JavaScriptCore/bytecode/CodeBlock.cpp (revision 244505) >+++ Source/JavaScriptCore/bytecode/CodeBlock.cpp (working copy) >@@ -180,7 +180,7 @@ CString CodeBlock::hashAsStringIfPossibl > return "<no-hash>"; > } > >-void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const >+void CodeBlock::dumpAssumingJITType(PrintStream& out, JITType jitType) const > { > out.print(inferredName(), "#", hashAsStringIfPossible()); > out.print(":[", RawPointer(this), "->"); >@@ -191,7 +191,7 @@ void CodeBlock::dumpAssumingJITType(Prin > if (codeType() == FunctionCode) > out.print(specializationKind()); > out.print(", ", instructionCount()); >- if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined) >+ if (this->jitType() == JITType::BaselineJIT && m_shouldAlwaysBeInlined) > out.print(" (ShouldAlwaysBeInlined)"); > if (ownerExecutable()->neverInline()) > out.print(" (NeverInline)"); >@@ -205,9 +205,9 @@ void CodeBlock::dumpAssumingJITType(Prin > out.print(" (StrictMode)"); > if (m_didFailJITCompilation) > out.print(" (JITFail)"); >- if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation) >+ if (this->jitType() == JITType::BaselineJIT && m_didFailFTLCompilation) > out.print(" (FTLFail)"); >- if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL) >+ if (this->jitType() == JITType::BaselineJIT && m_hasBeenCompiledWithFTL) > out.print(" (HadFTLReplacement)"); > out.print("]"); > } >@@ -521,10 +521,13 @@ bool CodeBlock::finishCreation(VM& vm, S > break; \ > } > >+ TraceProfile* lastTraceProfile = nullptr; >+ > const InstructionStream& instructionStream = instructions(); > for (const auto& instruction : instructionStream) { > OpcodeID opcodeID = instruction->opcodeID(); >- m_instructionCount += opcodeLengths[opcodeID]; >+ //m_instructionCount += opcodeLengths[opcodeID]; >+ //m_instructionCount += instruction->size(); > switch (opcodeID) { > LINK(OpHasIndexedProperty, arrayProfile) > >@@ -770,12 +773,31 @@ bool CodeBlock::finishCreation(VM& vm, S > m_numberOfArgumentsToSkip = numberOfArgumentsToSkip; > break; > } >+ >+ case op_trace_hint: { >+ INITIALIZE_METADATA(OpTraceHint) >+ unsigned offset = instruction.offset(); >+ if (lastTraceProfile) >+ lastTraceProfile->end = offset; >+ metadata.m_traceProfile.start = offset; >+ lastTraceProfile = &metadata.m_traceProfile; >+ >+ metadata.m_count = -30; >+ >+ metadata.m_entrypoint = 0; >+ break; >+ } > > default: > break; > } > } > >+ m_instructionCount += instructions().sizeInBytes(); >+ >+ if (lastTraceProfile) >+ lastTraceProfile->end = m_instructionCount; >+ > #undef CASE > #undef INITIALIZE_METADATA > #undef LINK_FIELD >@@ -929,7 +951,7 @@ void CodeBlock::setNumParameters(int new > CodeBlock* CodeBlock::specialOSREntryBlockOrNull() > { > #if ENABLE(FTL_JIT) >- if (jitType() != JITCode::DFGJIT) >+ if (jitType() != JITType::DFGJIT) > return 0; > DFG::JITCode* jitCode = m_jitCode->dfg(); > return jitCode->osrEntryBlock(); >@@ -1002,17 +1024,17 @@ bool CodeBlock::shouldJettisonDueToWeakR > return !vm.heap.isMarked(this); > } > >-static Seconds timeToLive(JITCode::JITType jitType) >+static Seconds timeToLive(JITType jitType) > { > if (UNLIKELY(Options::useEagerCodeBlockJettisonTiming())) { > switch (jitType) { >- case JITCode::InterpreterThunk: >+ case JITType::InterpreterThunk: > return 10_ms; >- case JITCode::BaselineJIT: >+ case JITType::BaselineJIT: > return 30_ms; >- case JITCode::DFGJIT: >+ case JITType::DFGJIT: > return 40_ms; >- case JITCode::FTLJIT: >+ case JITType::FTLJIT: > return 120_ms; > default: > return Seconds::infinity(); >@@ -1020,15 +1042,15 @@ static Seconds timeToLive(JITCode::JITTy > } > > switch (jitType) { >- case JITCode::InterpreterThunk: >+ case JITType::InterpreterThunk: > return 5_s; >- case JITCode::BaselineJIT: >+ case JITType::BaselineJIT: > // Effectively 10 additional seconds, since BaselineJIT and > // InterpreterThunk share a CodeBlock. > return 15_s; >- case JITCode::DFGJIT: >+ case JITType::DFGJIT: > return 20_s; >- case JITCode::FTLJIT: >+ case JITType::FTLJIT: > return 60_s; > default: > return Seconds::infinity(); >@@ -1068,7 +1090,8 @@ void CodeBlock::propagateTransitions(con > > VM& vm = *m_vm; > >- if (jitType() == JITCode::InterpreterThunk) { >+ //if (jitType() == JITType::InterpreterThunk) { >+ if (JITCode::couldBeInterpreted(jitType())) { > const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); > const InstructionStream& instructionStream = instructions(); > for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { >@@ -1525,6 +1548,9 @@ void CodeBlock::setCalleeSaveRegisters(s > ensureJITData(locker).m_calleeSaveRegisters = WTFMove(registerAtOffsetList); > } > >+/* >+ OOPS: Make this temporary per compile until we actually >+ get executable code! > void CodeBlock::resetJITData() > { > RELEASE_ASSERT(!JITCode::isJIT(jitType())); >@@ -1543,6 +1569,7 @@ void CodeBlock::resetJITData() > jitData->m_rareCaseProfiles.clear(); > } > } >+*/ > #endif > > void CodeBlock::visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor& visitor) >@@ -1630,7 +1657,7 @@ CodeBlock* CodeBlock::baselineAlternativ > while (result->alternative()) > result = result->alternative(); > RELEASE_ASSERT(result); >- RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None); >+ RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITType::None); > return result; > #else > return this; >@@ -1640,7 +1667,7 @@ CodeBlock* CodeBlock::baselineAlternativ > CodeBlock* CodeBlock::baselineVersion() > { > #if ENABLE(JIT) >- JITCode::JITType selfJITType = jitType(); >+ JITType selfJITType = jitType(); > if (JITCode::isBaselineCode(selfJITType)) > return this; > CodeBlock* result = replacement(); >@@ -1654,7 +1681,7 @@ CodeBlock* CodeBlock::baselineVersion() > } else { > // This can happen if we're creating the original CodeBlock for an executable. > // Assume that we're the baseline CodeBlock. >- RELEASE_ASSERT(selfJITType == JITCode::None); >+ RELEASE_ASSERT(selfJITType == JITType::None); > return this; > } > } >@@ -1667,7 +1694,7 @@ CodeBlock* CodeBlock::baselineVersion() > } > > #if ENABLE(JIT) >-bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace) >+bool CodeBlock::hasOptimizedReplacement(JITType typeToReplace) > { > CodeBlock* replacement = this->replacement(); > return replacement && JITCode::isHigherTier(replacement->jitType(), typeToReplace); >@@ -2147,7 +2174,7 @@ void CodeBlock::noticeIncomingCall(ExecS > return; > } > >- if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) { >+ if (callerCodeBlock->jitType() == JITType::InterpreterThunk) { > // If the caller is still in the interpreter, then we can't expect inlining to > // happen anytime soon. Assume it's profitable to optimize it separately. This > // ensures that a function is SABI only if it is called no more frequently than >@@ -2489,10 +2516,10 @@ void CodeBlock::forceOptimizationSlowPat > #if ENABLE(DFG_JIT) > void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result) > { >- JITCode::JITType type = jitType(); >- if (type != JITCode::BaselineJIT) { >+ JITType type = jitType(); >+ if (type != JITType::BaselineJIT) { > dataLog(*this, ": expected to have baseline code but have ", type, "\n"); >- CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), type); >+ CRASH_WITH_INFO(bitwise_cast<uintptr_t>(jitCode().get()), static_cast<uint8_t>(type)); > } > > CodeBlock* replacement = this->replacement(); >@@ -2723,12 +2750,12 @@ bool CodeBlock::shouldOptimizeNow() > void CodeBlock::tallyFrequentExitSites() > { > ASSERT(JITCode::isOptimizingJIT(jitType())); >- ASSERT(alternative()->jitType() == JITCode::BaselineJIT); >+ ASSERT(alternative()->jitType() == JITType::BaselineJIT); > > CodeBlock* profiledBlock = alternative(); > > switch (jitType()) { >- case JITCode::DFGJIT: { >+ case JITType::DFGJIT: { > DFG::JITCode* jitCode = m_jitCode->dfg(); > for (auto& exit : jitCode->osrExit) > exit.considerAddingAsFrequentExitSite(profiledBlock); >@@ -2736,7 +2763,7 @@ void CodeBlock::tallyFrequentExitSites() > } > > #if ENABLE(FTL_JIT) >- case JITCode::FTLJIT: { >+ case JITType::FTLJIT: { > // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit > // vector contains a totally different type, that just so happens to behave like > // DFG::JITCode::osrExit. >@@ -2824,17 +2851,17 @@ void CodeBlock::dumpValueProfiles() > unsigned CodeBlock::frameRegisterCount() > { > switch (jitType()) { >- case JITCode::InterpreterThunk: >+ case JITType::InterpreterThunk: > return LLInt::frameRegisterCountFor(this); > > #if ENABLE(JIT) >- case JITCode::BaselineJIT: >+ case JITType::BaselineJIT: > return JIT::frameRegisterCountFor(this); > #endif // ENABLE(JIT) > > #if ENABLE(DFG_JIT) >- case JITCode::DFGJIT: >- case JITCode::FTLJIT: >+ case JITType::DFGJIT: >+ case JITType::FTLJIT: > return jitCode()->dfgCommon()->frameRegisterCount; > #endif // ENABLE(DFG_JIT) > >@@ -3175,15 +3202,15 @@ Optional<CodeOrigin> CodeBlock::findPC(v > Optional<unsigned> CodeBlock::bytecodeOffsetFromCallSiteIndex(CallSiteIndex callSiteIndex) > { > Optional<unsigned> bytecodeOffset; >- JITCode::JITType jitType = this->jitType(); >- if (jitType == JITCode::InterpreterThunk || jitType == JITCode::BaselineJIT) { >+ JITType jitType = this->jitType(); >+ if (jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT) { > #if USE(JSVALUE64) > bytecodeOffset = callSiteIndex.bits(); > #else > Instruction* instruction = bitwise_cast<Instruction*>(callSiteIndex.bits()); > bytecodeOffset = this->bytecodeOffset(instruction); > #endif >- } else if (jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT) { >+ } else if (jitType == JITType::DFGJIT || jitType == JITType::FTLJIT) { > #if ENABLE(DFG_JIT) > RELEASE_ASSERT(canGetCodeOrigin(callSiteIndex)); > CodeOrigin origin = codeOrigin(callSiteIndex); >Index: Source/JavaScriptCore/bytecode/CodeBlock.h >=================================================================== >--- Source/JavaScriptCore/bytecode/CodeBlock.h (revision 244505) >+++ Source/JavaScriptCore/bytecode/CodeBlock.h (working copy) >@@ -142,7 +142,7 @@ public: > CString hashAsStringIfPossible() const; > CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature. > CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space. >- void dumpAssumingJITType(PrintStream&, JITCode::JITType) const; >+ void dumpAssumingJITType(PrintStream&, JITType) const; > JS_EXPORT_PRIVATE void dump(PrintStream&) const; > > int numParameters() const { return m_numParameters; } >@@ -309,16 +309,16 @@ public: > // looking for a CallLinkInfoMap to amortize the cost of calling this. > CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex); > >- void setJITCodeMap(JITCodeMap&& jitCodeMap) >- { >- ConcurrentJSLocker locker(m_lock); >- ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap); >- } >- const JITCodeMap& jitCodeMap() >- { >- ConcurrentJSLocker locker(m_lock); >- return ensureJITData(locker).m_jitCodeMap; >- } >+ //void setJITCodeMap(JITCodeMap&& jitCodeMap) >+ //{ >+ // ConcurrentJSLocker locker(m_lock); >+ // ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap); >+ //} >+ //const JITCodeMap& jitCodeMap() >+ //{ >+ // ConcurrentJSLocker locker(m_lock); >+ // return ensureJITData(locker).m_jitCodeMap; >+ //} > > void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&); > Optional<CodeOrigin> findPC(void* pc); >@@ -348,12 +348,6 @@ public: > return value >= Options::couldTakeSlowCaseMinimumCount(); > } > >- // We call this when we want to reattempt compiling something with the baseline JIT. Ideally >- // the baseline JIT would not add data to CodeBlock, but instead it would put its data into >- // a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we >- // would be able to get rid of this silly function. >- // FIXME: https://bugs.webkit.org/show_bug.cgi?id=159061 >- void resetJITData(); > #endif // ENABLE(JIT) > > void unlinkIncomingCalls(); >@@ -402,18 +396,18 @@ public: > > RefPtr<JITCode> jitCode() { return m_jitCode; } > static ptrdiff_t jitCodeOffset() { return OBJECT_OFFSETOF(CodeBlock, m_jitCode); } >- JITCode::JITType jitType() const >+ JITType jitType() const > { > JITCode* jitCode = m_jitCode.get(); > WTF::loadLoadFence(); >- JITCode::JITType result = JITCode::jitTypeFor(jitCode); >+ JITType result = JITCode::jitTypeFor(jitCode); > WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good. > return result; > } > > bool hasBaselineJITProfiling() const > { >- return jitType() == JITCode::BaselineJIT; >+ return jitType() == JITType::BaselineJIT; > } > > #if ENABLE(JIT) >@@ -423,7 +417,7 @@ public: > DFG::CapabilityLevel capabilityLevel(); > DFG::CapabilityLevel capabilityLevelState() { return static_cast<DFG::CapabilityLevel>(m_capabilityLevelState); } > >- bool hasOptimizedReplacement(JITCode::JITType typeToReplace); >+ bool hasOptimizedReplacement(JITType typeToReplace); > bool hasOptimizedReplacement(); // the typeToReplace is my JITType > #endif > >@@ -883,6 +877,10 @@ public: > return m_unlinkedCode->metadataSizeInBytes(); > } > >+ MetadataTable* metadataTable() { return m_metadata.get(); } >+ >+ const void* instructionsRawPointer() { return m_instructionsRawPointer; } >+ > protected: > void finalizeLLIntInlineCaches(); > #if ENABLE(JIT) >Index: Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h >=================================================================== >--- Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h (revision 244505) >+++ Source/JavaScriptCore/bytecode/CodeBlockWithJITType.h (working copy) >@@ -34,7 +34,7 @@ namespace JSC { > > class CodeBlockWithJITType { > public: >- CodeBlockWithJITType(CodeBlock* codeBlock, JITCode::JITType jitType) >+ CodeBlockWithJITType(CodeBlock* codeBlock, JITType jitType) > : m_codeBlock(codeBlock) > , m_jitType(jitType) > { >@@ -46,7 +46,7 @@ public: > } > private: > CodeBlock* m_codeBlock; >- JITCode::JITType m_jitType; >+ JITType m_jitType; > }; > > } // namespace JSC >Index: Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp >=================================================================== >--- Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp (revision 244505) >+++ Source/JavaScriptCore/bytecode/DeferredSourceDump.cpp (working copy) >@@ -34,11 +34,11 @@ namespace JSC { > > DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock) > : m_codeBlock(*codeBlock->vm(), codeBlock) >- , m_rootJITType(JITCode::None) >+ , m_rootJITType(JITType::None) > { > } > >-DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, unsigned callerBytecodeIndex) >+DeferredSourceDump::DeferredSourceDump(CodeBlock* codeBlock, CodeBlock* rootCodeBlock, JITType rootJITType, unsigned callerBytecodeIndex) > : m_codeBlock(*codeBlock->vm(), codeBlock) > , m_rootCodeBlock(*codeBlock->vm(), rootCodeBlock) > , m_rootJITType(rootJITType) >Index: Source/JavaScriptCore/bytecode/DeferredSourceDump.h >=================================================================== >--- Source/JavaScriptCore/bytecode/DeferredSourceDump.h (revision 244505) >+++ Source/JavaScriptCore/bytecode/DeferredSourceDump.h (working copy) >@@ -35,14 +35,14 @@ class CodeBlock; > class DeferredSourceDump { > public: > DeferredSourceDump(CodeBlock*); >- DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITCode::JITType rootJITType, unsigned callerBytecodeIndex); >+ DeferredSourceDump(CodeBlock*, CodeBlock* rootCodeBlock, JITType rootJITType, unsigned callerBytecodeIndex); > > void dump(); > > private: > Strong<CodeBlock> m_codeBlock; > Strong<CodeBlock> m_rootCodeBlock; >- JITCode::JITType m_rootJITType; >+ JITType m_rootJITType; > unsigned m_callerBytecodeIndex { UINT_MAX }; > }; > >Index: Source/JavaScriptCore/bytecode/ExitingJITType.h >=================================================================== >--- Source/JavaScriptCore/bytecode/ExitingJITType.h (revision 244505) >+++ Source/JavaScriptCore/bytecode/ExitingJITType.h (working copy) >@@ -35,12 +35,12 @@ enum ExitingJITType : uint8_t { > ExitFromFTL > }; > >-inline ExitingJITType exitingJITTypeFor(JITCode::JITType type) >+inline ExitingJITType exitingJITTypeFor(JITType type) > { > switch (type) { >- case JITCode::DFGJIT: >+ case JITType::DFGJIT: > return ExitFromDFG; >- case JITCode::FTLJIT: >+ case JITType::FTLJIT: > return ExitFromFTL; > default: > RELEASE_ASSERT_NOT_REACHED(); >Index: Source/JavaScriptCore/bytecode/InlineCallFrame.h >=================================================================== >--- Source/JavaScriptCore/bytecode/InlineCallFrame.h (revision 244505) >+++ Source/JavaScriptCore/bytecode/InlineCallFrame.h (working copy) >@@ -240,7 +240,7 @@ inline CodeBlock* baselineCodeBlockForIn > > inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock) > { >- ASSERT(baselineCodeBlock->jitType() == JITCode::BaselineJIT); >+ ASSERT(baselineCodeBlock->jitType() == JITType::BaselineJIT); > auto* inlineCallFrame = codeOrigin.inlineCallFrame(); > if (inlineCallFrame) > return baselineCodeBlockForInlineCallFrame(inlineCallFrame); >Index: Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp (revision 244505) >+++ Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp (working copy) >@@ -1369,6 +1369,9 @@ void BytecodeGenerator::emitLabel(Label& > > m_codeBlock->addJumpTarget(newLabelIndex); > >+ if (m_lastInstruction->opcodeID() != op_trace_hint) >+ OpTraceHint::emit(this); >+ > // This disables peephole optimizations when an instruction is a jump target > m_lastOpcodeID = op_end; > } >@@ -1384,6 +1387,8 @@ void BytecodeGenerator::emitEnter() > // This disables peephole optimizations when an instruction is a jump target > m_lastOpcodeID = op_end; > } >+ >+ OpTraceHint::emit(this); > } > > void BytecodeGenerator::emitLoopHint() >Index: Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp (working copy) >@@ -6554,6 +6554,11 @@ void ByteCodeParser::parseBlock(unsigned > NEXT_OPCODE(op_put_to_scope); > } > >+ case op_trace_hint: { >+ addToGraph(Check); // We add a nop here so that basic block linking doesn't break. >+ NEXT_OPCODE(op_trace_hint); >+ } >+ > case op_loop_hint: { > // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG > // OSR can only happen at basic block boundaries. Assert that these two statements >@@ -7094,7 +7099,7 @@ void ByteCodeParser::parseCodeBlock() > if (UNLIKELY(Options::dumpSourceAtDFGTime())) { > Vector<DeferredSourceDump>& deferredSourceDump = m_graph.m_plan.callback()->ensureDeferredSourceDump(); > if (inlineCallFrame()) { >- DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITCode::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex()); >+ DeferredSourceDump dump(codeBlock->baselineVersion(), m_codeBlock, JITType::DFGJIT, inlineCallFrame()->directCaller.bytecodeIndex()); > deferredSourceDump.append(dump); > } else > deferredSourceDump.append(DeferredSourceDump(codeBlock->baselineVersion())); >@@ -7104,7 +7109,7 @@ void ByteCodeParser::parseCodeBlock() > dataLog("Parsing ", *codeBlock); > if (inlineCallFrame()) { > dataLog( >- " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), >+ " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT), > " ", inlineCallFrame()->directCaller); > } > dataLog( >Index: Source/JavaScriptCore/dfg/DFGCapabilities.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGCapabilities.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGCapabilities.cpp (working copy) >@@ -202,6 +202,7 @@ CapabilityLevel capabilityLevel(OpcodeID > case op_jbelow: > case op_jbeloweq: > case op_loop_hint: >+ case op_trace_hint: > case op_check_traps: > case op_nop: > case op_ret: >Index: Source/JavaScriptCore/dfg/DFGDisassembler.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGDisassembler.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGDisassembler.cpp (working copy) >@@ -74,7 +74,7 @@ void Disassembler::reportToProfiler(Prof > > void Disassembler::dumpHeader(PrintStream& out, LinkBuffer& linkBuffer) > { >- out.print("Generated DFG JIT code for ", CodeBlockWithJITType(m_graph.m_codeBlock, JITCode::DFGJIT), ", instruction count = ", m_graph.m_codeBlock->instructionCount(), ":\n"); >+ out.print("Generated DFG JIT code for ", CodeBlockWithJITType(m_graph.m_codeBlock, JITType::DFGJIT), ", instruction count = ", m_graph.m_codeBlock->instructionCount(), ":\n"); > out.print(" Optimized with execution counter = ", m_graph.m_profiledBlock->jitExecuteCounter(), "\n"); > out.print(" Code at [", RawPointer(linkBuffer.debugAddress()), ", ", RawPointer(static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.size()), "):\n"); > } >Index: Source/JavaScriptCore/dfg/DFGDriver.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGDriver.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGDriver.cpp (working copy) >@@ -81,8 +81,8 @@ static CompilationResult compileImpl( > > ASSERT(codeBlock); > ASSERT(codeBlock->alternative()); >- ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT); >- ASSERT(!profiledDFGCodeBlock || profiledDFGCodeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT); >+ ASSERT(!profiledDFGCodeBlock || profiledDFGCodeBlock->jitType() == JITType::DFGJIT); > > if (logCompilationChanges(mode)) > dataLog("DFG(Driver) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n"); >Index: Source/JavaScriptCore/dfg/DFGGraph.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGGraph.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGGraph.cpp (working copy) >@@ -525,7 +525,7 @@ void Graph::dump(PrintStream& out, DumpC > context = &myContext; > > out.print("\n"); >- out.print("DFG for ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), ":\n"); >+ out.print("DFG for ", CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT), ":\n"); > out.print(" Fixpoint state: ", m_fixpointState, "; Form: ", m_form, "; Unification state: ", m_unificationState, "; Ref count state: ", m_refCountState, "\n"); > if (m_form == SSA) { > for (unsigned entrypointIndex = 0; entrypointIndex < m_argumentFormats.size(); ++entrypointIndex) >Index: Source/JavaScriptCore/dfg/DFGJITCode.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGJITCode.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGJITCode.cpp (working copy) >@@ -36,7 +36,7 @@ > namespace JSC { namespace DFG { > > JITCode::JITCode() >- : DirectJITCode(DFGJIT) >+ : DirectJITCode(JITType::DFGJIT) > #if ENABLE(FTL_JIT) > , osrEntryRetry(0) > , abandonOSREntry(false) >@@ -123,13 +123,13 @@ RegisterSet JITCode::liveRegistersToPres > #if ENABLE(FTL_JIT) > bool JITCode::checkIfOptimizationThresholdReached(CodeBlock* codeBlock) > { >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > return tierUpCounter.checkIfThresholdCrossedAndSet(codeBlock); > } > > void JITCode::optimizeNextInvocation(CodeBlock* codeBlock) > { >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > if (Options::verboseOSR()) > dataLog(*codeBlock, ": FTL-optimizing next invocation.\n"); > tierUpCounter.setNewThreshold(0, codeBlock); >@@ -137,7 +137,7 @@ void JITCode::optimizeNextInvocation(Cod > > void JITCode::dontOptimizeAnytimeSoon(CodeBlock* codeBlock) > { >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > if (Options::verboseOSR()) > dataLog(*codeBlock, ": Not FTL-optimizing anytime soon.\n"); > tierUpCounter.deferIndefinitely(); >@@ -145,7 +145,7 @@ void JITCode::dontOptimizeAnytimeSoon(Co > > void JITCode::optimizeAfterWarmUp(CodeBlock* codeBlock) > { >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > if (Options::verboseOSR()) > dataLog(*codeBlock, ": FTL-optimizing after warm-up.\n"); > CodeBlock* baseline = codeBlock->baselineVersion(); >@@ -156,7 +156,7 @@ void JITCode::optimizeAfterWarmUp(CodeBl > > void JITCode::optimizeSoon(CodeBlock* codeBlock) > { >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > if (Options::verboseOSR()) > dataLog(*codeBlock, ": FTL-optimizing soon.\n"); > CodeBlock* baseline = codeBlock->baselineVersion(); >@@ -167,7 +167,7 @@ void JITCode::optimizeSoon(CodeBlock* co > > void JITCode::forceOptimizationSlowPathConcurrently(CodeBlock* codeBlock) > { >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > if (Options::verboseOSR()) > dataLog(*codeBlock, ": Forcing slow path concurrently for FTL entry.\n"); > tierUpCounter.forceSlowPathConcurrently(); >@@ -176,7 +176,7 @@ void JITCode::forceOptimizationSlowPathC > void JITCode::setOptimizationThresholdBasedOnCompilationResult( > CodeBlock* codeBlock, CompilationResult result) > { >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > switch (result) { > case CompilationSuccessful: > optimizeNextInvocation(codeBlock); >Index: Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGJITFinalizer.cpp (working copy) >@@ -56,7 +56,7 @@ size_t JITFinalizer::codeSize() > > bool JITFinalizer::finalize() > { >- MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::DFGJIT)).data()); >+ MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITType::DFGJIT)).data()); > m_jitCode->initializeCodeRefForDFG(codeRef, codeRef.code()); > > m_plan.codeBlock()->setJITCode(m_jitCode.copyRef()); >@@ -70,7 +70,7 @@ bool JITFinalizer::finalizeFunction() > { > RELEASE_ASSERT(!m_withArityCheck.isEmptyValue()); > m_jitCode->initializeCodeRefForDFG( >- FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::DFGJIT)).data()), >+ FINALIZE_DFG_CODE(*m_linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITType::DFGJIT)).data()), > m_withArityCheck); > m_plan.codeBlock()->setJITCode(m_jitCode.copyRef()); > >Index: Source/JavaScriptCore/dfg/DFGOSREntry.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSREntry.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGOSREntry.cpp (working copy) >@@ -96,8 +96,7 @@ void* prepareOSREntry(ExecState* exec, C > { > ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType())); > ASSERT(codeBlock->alternative()); >- ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT); >- ASSERT(!codeBlock->jitCodeMap()); >+ ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT); > > if (!Options::useOSREntryToDFG()) > return nullptr; >@@ -115,8 +114,8 @@ void* prepareOSREntry(ExecState* exec, C > if (bytecodeIndex) > codeBlock->ownerExecutable()->setDidTryToEnterInLoop(true); > >- if (codeBlock->jitType() != JITCode::DFGJIT) { >- RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT); >+ if (codeBlock->jitType() != JITType::DFGJIT) { >+ RELEASE_ASSERT(codeBlock->jitType() == JITType::FTLJIT); > > // When will this happen? We could have: > // >@@ -341,11 +340,11 @@ void* prepareOSREntry(ExecState* exec, C > > MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex) > { >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT || codeBlock->jitType() == JITCode::FTLJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT || codeBlock->jitType() == JITType::FTLJIT); > >- if (!Options::useOSREntryToDFG() && codeBlock->jitCode()->jitType() == JITCode::DFGJIT) >+ if (!Options::useOSREntryToDFG() && codeBlock->jitCode()->jitType() == JITType::DFGJIT) > return nullptr; >- if (!Options::useOSREntryToFTL() && codeBlock->jitCode()->jitType() == JITCode::FTLJIT) >+ if (!Options::useOSREntryToFTL() && codeBlock->jitCode()->jitType() == JITType::FTLJIT) > return nullptr; > > VM& vm = exec->vm(); >Index: Source/JavaScriptCore/dfg/DFGOSRExit.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExit.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGOSRExit.cpp (working copy) >@@ -351,7 +351,7 @@ void OSRExit::executeOSRExit(Context& co > > CodeBlock* codeBlock = exec->codeBlock(); > ASSERT(codeBlock); >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > > // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't > // really be profitable. >@@ -371,11 +371,11 @@ void OSRExit::executeOSRExit(Context& co > // results will be cached in the OSRExitState record for use of the rest of the > // exit ramp code. > >- // Ensure we have baseline codeBlocks to OSR exit to. >- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); >- > CodeBlock* baselineCodeBlock = codeBlock->baselineAlternative(); >- ASSERT(baselineCodeBlock->jitType() == JITCode::BaselineJIT); >+ ASSERT(baselineCodeBlock->jitType() == JITType::BaselineJIT); >+ >+ // Ensure we have baseline codeBlocks to OSR exit to. >+ prepareCodeOriginForOSRExit(exec, baselineCodeBlock, exit.m_codeOrigin); > > SpeculationRecovery* recovery = nullptr; > if (exit.m_recoveryIndex != UINT_MAX) { >@@ -405,11 +405,10 @@ void OSRExit::executeOSRExit(Context& co > adjustedThreshold = BaselineExecutionCounter::clippedThreshold(codeBlock->globalObject(), adjustedThreshold); > > CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock); >- const JITCodeMap& codeMap = codeBlockForExit->jitCodeMap(); >- CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(exit.m_codeOrigin.bytecodeIndex()); >- ASSERT(codeLocation); >+ MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = static_cast<TraceletJITCode*>(codeBlockForExit->jitCode().get())->findCodeLocation(exit.m_codeOrigin.bytecodeIndex()); >+ ASSERT(!!codePtr); > >- void* jumpTarget = codeLocation.executableAddress(); >+ void* jumpTarget = codePtr.executableAddress(); > > // Compute the value recoveries. > Operands<ValueRecovery> operands; >@@ -445,7 +444,7 @@ void OSRExit::executeOSRExit(Context& co > > OSRExitState& exitState = *exit.exitState.get(); > CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock; >- ASSERT(baselineCodeBlock->jitType() == JITCode::BaselineJIT); >+ ASSERT(baselineCodeBlock->jitType() == JITType::BaselineJIT); > > Operands<ValueRecovery>& operands = exitState.operands; > Vector<UndefinedOperandSpan>& undefinedOperandSpans = exitState.undefinedOperandSpans; >@@ -752,7 +751,7 @@ static void reifyInlinedCallFrames(Conte > // FIXME: We shouldn't leave holes on the stack when performing an OSR exit > // in presence of inlined tail calls. > // https://bugs.webkit.org/show_bug.cgi?id=147511 >- ASSERT(outermostBaselineCodeBlock->jitType() == JITCode::BaselineJIT); >+ ASSERT(outermostBaselineCodeBlock->jitType() == JITType::BaselineJIT); > frame.setOperand<CodeBlock*>(CallFrameSlot::codeBlock, outermostBaselineCodeBlock); > > const CodeOrigin* codeOrigin; >@@ -1035,7 +1034,7 @@ void JIT_OPERATION OSRExit::compileOSREx > > CodeBlock* codeBlock = exec->codeBlock(); > ASSERT(codeBlock); >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > > // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't > // really be profitable. >@@ -1047,7 +1046,7 @@ void JIT_OPERATION OSRExit::compileOSREx > ASSERT(!vm->callFrameForCatch || exit.m_kind == GenericUnwind); > EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler()); > >- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); >+ prepareCodeOriginForOSRExit(exec, codeBlock->baselineAlternative(), exit.m_codeOrigin); > > // Compute the value recoveries. > Operands<ValueRecovery> operands; >Index: Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp (working copy) >@@ -116,10 +116,10 @@ void handleExitCounts(CCallHelpers& jit, > activeThreshold, jit.baselineCodeBlock()); > int32_t clippedValue; > switch (jit.codeBlock()->jitType()) { >- case JITCode::DFGJIT: >+ case JITType::DFGJIT: > clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); > break; >- case JITCode::FTLJIT: >+ case JITType::FTLJIT: > clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); > break; > default: >@@ -141,7 +141,7 @@ void reifyInlinedCallFrames(CCallHelpers > // FIXME: We shouldn't leave holes on the stack when performing an OSR exit > // in presence of inlined tail calls. > // https://bugs.webkit.org/show_bug.cgi?id=147511 >- ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT); >+ ASSERT(jit.baselineCodeBlock()->jitType() == JITType::BaselineJIT); > jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)CallFrameSlot::codeBlock)); > > const CodeOrigin* codeOrigin; >@@ -310,11 +310,12 @@ void adjustAndJumpToTarget(VM& vm, CCall > > CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin); > ASSERT(codeBlockForExit == codeBlockForExit->baselineVersion()); >- ASSERT(codeBlockForExit->jitType() == JITCode::BaselineJIT); >- CodeLocationLabel<JSEntryPtrTag> codeLocation = codeBlockForExit->jitCodeMap().find(exit.m_codeOrigin.bytecodeIndex()); >- ASSERT(codeLocation); >+ ASSERT(codeBlockForExit->jitType() == JITType::BaselineJIT); >+ RELEASE_ASSERT(codeBlockForExit->jitCode()->isTraceletJITCode()); >+ MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = static_cast<TraceletJITCode*>(codeBlockForExit->jitCode().get())->findCodeLocation(exit.m_codeOrigin.bytecodeIndex()); >+ ASSERT(!!codePtr); > >- void* jumpTarget = codeLocation.retagged<OSRExitPtrTag>().executableAddress(); >+ void* jumpTarget = codePtr.retagged<OSRExitPtrTag>().executableAddress(); > jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister); > if (exit.isExceptionHandler()) { > // Since we're jumping to op_catch, we need to set callFrameForCatch. >Index: Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.h (working copy) >@@ -41,11 +41,11 @@ void reifyInlinedCallFrames(CCallHelpers > void adjustAndJumpToTarget(VM&, CCallHelpers&, const OSRExitBase&); > > template <typename JITCodeType> >-void adjustFrameAndStackInOSRExitCompilerThunk(MacroAssembler& jit, VM* vm, JITCode::JITType jitType) >+void adjustFrameAndStackInOSRExitCompilerThunk(MacroAssembler& jit, VM* vm, JITType jitType) > { >- ASSERT(jitType == JITCode::DFGJIT || jitType == JITCode::FTLJIT); >+ ASSERT(jitType == JITType::DFGJIT || jitType == JITType::FTLJIT); > >- bool isFTLOSRExit = jitType == JITCode::FTLJIT; >+ bool isFTLOSRExit = jitType == JITType::FTLJIT; > RegisterSet registersToPreserve; > registersToPreserve.set(GPRInfo::regT0); > if (isFTLOSRExit) { >Index: Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGOSRExitPreparation.cpp (working copy) >@@ -36,15 +36,19 @@ > > namespace JSC { namespace DFG { > >-void prepareCodeOriginForOSRExit(ExecState* exec, CodeOrigin codeOrigin) >+void prepareCodeOriginForOSRExit(ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin) > { > VM& vm = exec->vm(); > DeferGC deferGC(vm.heap); >- >+ >+ RELEASE_ASSERT(codeBlock->baselineAlternative() == codeBlock); >+ > for (; codeOrigin.inlineCallFrame(); codeOrigin = codeOrigin.inlineCallFrame()->directCaller) { > CodeBlock* codeBlock = codeOrigin.inlineCallFrame()->baselineCodeBlock.get(); >- JITWorklist::ensureGlobalWorklist().compileNow(codeBlock); >+ JITWorklist::ensureGlobalWorklist().compileNow(codeBlock, codeOrigin.bytecodeIndex()); > } >+ >+ JITWorklist::ensureGlobalWorklist().compileNow(codeBlock, codeOrigin.bytecodeIndex()); > } > > } } // namespace JSC::DFG >Index: Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGOSRExitPreparation.h (working copy) >@@ -41,7 +41,7 @@ namespace JSC { namespace DFG { > // probably it's a good sign that the thing we're exiting into is hot. Even more > // interestingly, since the code was inlined, it may never otherwise get JIT > // compiled since the act of inlining it may ensure that it otherwise never runs. >-void prepareCodeOriginForOSRExit(ExecState*, CodeOrigin); >+void prepareCodeOriginForOSRExit(ExecState*, CodeBlock*, CodeOrigin); > > } } // namespace JSC::DFG > >Index: Source/JavaScriptCore/dfg/DFGOperations.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGOperations.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGOperations.cpp (working copy) >@@ -3167,7 +3167,7 @@ void JIT_OPERATION triggerTierUpNow(Exec > > sanitizeStackForVM(vm); > >- if (codeBlock->jitType() != JITCode::DFGJIT) { >+ if (codeBlock->jitType() != JITType::DFGJIT) { > dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n"); > RELEASE_ASSERT_NOT_REACHED(); > } >@@ -3369,7 +3369,6 @@ static char* tierUpCommon(ExecState* exe > > if (tryTriggerOuterLoopToCompile()) { > jitCode->setOptimizationThresholdBasedOnCompilationResult(codeBlock, CompilationDeferred); >- return nullptr; > } > } > >@@ -3433,7 +3432,7 @@ void JIT_OPERATION triggerTierUpNowInLoo > > sanitizeStackForVM(vm); > >- if (codeBlock->jitType() != JITCode::DFGJIT) { >+ if (codeBlock->jitType() != JITType::DFGJIT) { > dataLog("Unexpected code block in DFG->FTL trigger tier up now in loop: ", *codeBlock, "\n"); > RELEASE_ASSERT_NOT_REACHED(); > } >@@ -3467,7 +3466,7 @@ char* JIT_OPERATION triggerOSREntryNow(E > > sanitizeStackForVM(vm); > >- if (codeBlock->jitType() != JITCode::DFGJIT) { >+ if (codeBlock->jitType() != JITType::DFGJIT) { > dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n"); > RELEASE_ASSERT_NOT_REACHED(); > } >Index: Source/JavaScriptCore/dfg/DFGThunks.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGThunks.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGThunks.cpp (working copy) >@@ -53,7 +53,7 @@ MacroAssemblerCodeRef<JITThunkPtrTag> os > MacroAssembler jit; > > // This needs to happen before we use the scratch buffer because this function also uses the scratch buffer. >- adjustFrameAndStackInOSRExitCompilerThunk<DFG::JITCode>(jit, vm, JITCode::DFGJIT); >+ adjustFrameAndStackInOSRExitCompilerThunk<DFG::JITCode>(jit, vm, JITType::DFGJIT); > > size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters); > ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(scratchSize); >Index: Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp >=================================================================== >--- Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp (revision 244505) >+++ Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp (working copy) >@@ -119,7 +119,7 @@ unsigned VariableEventStream::reconstruc > CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph, > unsigned index, Operands<ValueRecovery>& valueRecoveries, Vector<UndefinedOperandSpan>* undefinedOperandSpans) const > { >- ASSERT(codeBlock->jitType() == JITCode::DFGJIT); >+ ASSERT(codeBlock->jitType() == JITType::DFGJIT); > CodeBlock* baselineCodeBlock = codeBlock->baselineVersion(); > > unsigned numVariables; >Index: Source/JavaScriptCore/ftl/FTLCompile.cpp >=================================================================== >--- Source/JavaScriptCore/ftl/FTLCompile.cpp (revision 244505) >+++ Source/JavaScriptCore/ftl/FTLCompile.cpp (working copy) >@@ -79,7 +79,7 @@ void compile(State& state, Safepoint::Re > std::unique_ptr<RegisterAtOffsetList> registerOffsets = > std::make_unique<RegisterAtOffsetList>(state.proc->calleeSaveRegisterAtOffsetList()); > if (shouldDumpDisassembly()) >- dataLog("Unwind info for ", CodeBlockWithJITType(codeBlock, JITCode::FTLJIT), ": ", *registerOffsets, "\n"); >+ dataLog("Unwind info for ", CodeBlockWithJITType(codeBlock, JITType::FTLJIT), ": ", *registerOffsets, "\n"); > codeBlock->setCalleeSaveRegisters(WTFMove(registerOffsets)); > ASSERT(!(state.proc->frameSize() % sizeof(EncodedJSValue))); > state.jitCode->common.frameRegisterCount = state.proc->frameSize() / sizeof(EncodedJSValue); >@@ -168,7 +168,7 @@ void compile(State& state, Safepoint::Re > if (B3::Air::Disassembler* disassembler = state.proc->code().disassembler()) { > PrintStream& out = WTF::dataFile(); > >- out.print("Generated ", state.graph.m_plan.mode(), " code for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ", instruction count = ", state.graph.m_codeBlock->instructionCount(), ":\n"); >+ out.print("Generated ", state.graph.m_plan.mode(), " code for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITType::FTLJIT), ", instruction count = ", state.graph.m_codeBlock->instructionCount(), ":\n"); > > LinkBuffer& linkBuffer = *state.finalizer->b3CodeLinkBuffer; > B3::Value* currentB3Value = nullptr; >Index: Source/JavaScriptCore/ftl/FTLJITCode.cpp >=================================================================== >--- Source/JavaScriptCore/ftl/FTLJITCode.cpp (revision 244505) >+++ Source/JavaScriptCore/ftl/FTLJITCode.cpp (working copy) >@@ -36,7 +36,7 @@ namespace JSC { namespace FTL { > using namespace B3; > > JITCode::JITCode() >- : JSC::JITCode(FTLJIT) >+ : JSC::JITCode(JITType::FTLJIT) > { > } > >Index: Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp >=================================================================== >--- Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp (revision 244505) >+++ Source/JavaScriptCore/ftl/FTLJITFinalizer.cpp (working copy) >@@ -75,11 +75,11 @@ bool JITFinalizer::finalizeCommon() > > MacroAssemblerCodeRef<JSEntryPtrTag> b3CodeRef = > FINALIZE_CODE_IF(dumpDisassembly, *b3CodeLinkBuffer, JSEntryPtrTag, >- "FTL B3 code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::FTLJIT)).data()); >+ "FTL B3 code for %s", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITType::FTLJIT)).data()); > > MacroAssemblerCodeRef<JSEntryPtrTag> arityCheckCodeRef = entrypointLinkBuffer > ? FINALIZE_CODE_IF(dumpDisassembly, *entrypointLinkBuffer, JSEntryPtrTag, >- "FTL entrypoint thunk for %s with B3 generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITCode::FTLJIT)).data(), function) >+ "FTL entrypoint thunk for %s with B3 generated code at %p", toCString(CodeBlockWithJITType(m_plan.codeBlock(), JITType::FTLJIT)).data(), function) > : MacroAssemblerCodeRef<JSEntryPtrTag>::createSelfManagedCodeRef(b3CodeRef.code()); > > jitCode->initializeB3Code(b3CodeRef); >Index: Source/JavaScriptCore/ftl/FTLLink.cpp >=================================================================== >--- Source/JavaScriptCore/ftl/FTLLink.cpp (revision 244505) >+++ Source/JavaScriptCore/ftl/FTLLink.cpp (working copy) >@@ -71,7 +71,7 @@ void link(State& state) > if (UNLIKELY(compilation)) { > compilation->addDescription( > Profiler::OriginStack(), >- toCString("Generated FTL JIT code for ", CodeBlockWithJITType(codeBlock, JITCode::FTLJIT), ", instruction count = ", graph.m_codeBlock->instructionCount(), ":\n")); >+ toCString("Generated FTL JIT code for ", CodeBlockWithJITType(codeBlock, JITType::FTLJIT), ", instruction count = ", graph.m_codeBlock->instructionCount(), ":\n")); > > graph.ensureSSADominators(); > graph.ensureSSANaturalLoops(); >Index: Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp >=================================================================== >--- Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp (revision 244505) >+++ Source/JavaScriptCore/ftl/FTLOSRExitCompiler.cpp (working copy) >@@ -506,7 +506,7 @@ extern "C" void* compileFTLOSRExit(ExecS > CodeBlock* codeBlock = exec->codeBlock(); > > ASSERT(codeBlock); >- ASSERT(codeBlock->jitType() == JITCode::FTLJIT); >+ ASSERT(codeBlock->jitType() == JITType::FTLJIT); > > // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't > // really be profitable. >@@ -532,7 +532,7 @@ extern "C" void* compileFTLOSRExit(ExecS > } > } > >- prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); >+ prepareCodeOriginForOSRExit(exec, codeBlock->baselineAlternative(), exit.m_codeOrigin); > > compileStub(exitID, jitCode, exit, &vm, codeBlock); > >Index: Source/JavaScriptCore/ftl/FTLThunks.cpp >=================================================================== >--- Source/JavaScriptCore/ftl/FTLThunks.cpp (revision 244505) >+++ Source/JavaScriptCore/ftl/FTLThunks.cpp (working copy) >@@ -53,7 +53,7 @@ static MacroAssemblerCodeRef<JITThunkPtr > > if (frameAndStackAdjustmentRequirement == FrameAndStackAdjustmentRequirement::Needed) { > // This needs to happen before we use the scratch buffer because this function also uses the scratch buffer. >- adjustFrameAndStackInOSRExitCompilerThunk<FTL::JITCode>(jit, vm, JITCode::FTLJIT); >+ adjustFrameAndStackInOSRExitCompilerThunk<FTL::JITCode>(jit, vm, JITType::FTLJIT); > } > > // Note that the "return address" will be the ID that we pass to the generation function. >Index: Source/JavaScriptCore/interpreter/CallFrame.cpp >=================================================================== >--- Source/JavaScriptCore/interpreter/CallFrame.cpp (revision 244505) >+++ Source/JavaScriptCore/interpreter/CallFrame.cpp (working copy) >@@ -52,11 +52,11 @@ bool CallFrame::callSiteBitsAreBytecodeO > { > ASSERT(codeBlock()); > switch (codeBlock()->jitType()) { >- case JITCode::InterpreterThunk: >- case JITCode::BaselineJIT: >+ case JITType::InterpreterThunk: >+ case JITType::BaselineJIT: > return true; >- case JITCode::None: >- case JITCode::HostCallThunk: >+ case JITType::None: >+ case JITType::HostCallThunk: > RELEASE_ASSERT_NOT_REACHED(); > return false; > default: >@@ -71,11 +71,11 @@ bool CallFrame::callSiteBitsAreCodeOrigi > { > ASSERT(codeBlock()); > switch (codeBlock()->jitType()) { >- case JITCode::DFGJIT: >- case JITCode::FTLJIT: >+ case JITType::DFGJIT: >+ case JITType::FTLJIT: > return true; >- case JITCode::None: >- case JITCode::HostCallThunk: >+ case JITType::None: >+ case JITType::HostCallThunk: > RELEASE_ASSERT_NOT_REACHED(); > return false; > default: >Index: Source/JavaScriptCore/interpreter/StackVisitor.cpp >=================================================================== >--- Source/JavaScriptCore/interpreter/StackVisitor.cpp (revision 244505) >+++ Source/JavaScriptCore/interpreter/StackVisitor.cpp (working copy) >@@ -492,8 +492,8 @@ void StackVisitor::Frame::dump(PrintStre > CallSiteIndex callSiteIndex = callFrame->callSiteIndex(); > out.print(indent, "callSiteIndex: ", callSiteIndex.bits(), " of ", codeBlock->codeOrigins().size(), "\n"); > >- JITCode::JITType jitType = codeBlock->jitType(); >- if (jitType != JITCode::FTLJIT) { >+ JITType jitType = codeBlock->jitType(); >+ if (jitType != JITType::FTLJIT) { > JITCode* jitCode = codeBlock->jitCode().get(); > out.print(indent, "jitCode: ", RawPointer(jitCode), > " start ", RawPointer(jitCode->start()), >Index: Source/JavaScriptCore/jit/AssemblyHelpers.h >=================================================================== >--- Source/JavaScriptCore/jit/AssemblyHelpers.h (revision 244505) >+++ Source/JavaScriptCore/jit/AssemblyHelpers.h (working copy) >@@ -52,12 +52,12 @@ class AssemblyHelpers : public MacroAsse > public: > AssemblyHelpers(CodeBlock* codeBlock) > : m_codeBlock(codeBlock) >- , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0) >+ , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : nullptr) > { > if (m_codeBlock) { > ASSERT(m_baselineCodeBlock); > ASSERT(!m_baselineCodeBlock->alternative()); >- ASSERT(m_baselineCodeBlock->jitType() == JITCode::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType())); >+ ASSERT(m_baselineCodeBlock->jitType() == JITType::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType())); > } > } > >Index: Source/JavaScriptCore/jit/ExecutableAllocator.cpp >=================================================================== >--- Source/JavaScriptCore/jit/ExecutableAllocator.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/ExecutableAllocator.cpp (working copy) >@@ -159,9 +159,10 @@ public: > return; > > size_t reservationSize; >- if (Options::jitMemoryReservationSize()) >- reservationSize = Options::jitMemoryReservationSize(); >- else >+ // OOPS! >+ //if (Options::jitMemoryReservationSize()) >+ // reservationSize = Options::jitMemoryReservationSize(); >+ //else > reservationSize = fixedExecutableMemoryPoolSize; > reservationSize = std::max(roundUpToMultipleOf(pageSize(), reservationSize), pageSize() * 2); > >Index: Source/JavaScriptCore/jit/JIT.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JIT.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/JIT.cpp (working copy) >@@ -41,6 +41,7 @@ > #include "JSCInlines.h" > #include "JSFunction.h" > #include "LinkBuffer.h" >+#include "LLIntData.h" > #include "MaxFrameExtentForSlowPathCall.h" > #include "ModuleProgramCodeBlock.h" > #include "PCToCodeOriginMap.h" >@@ -56,11 +57,15 @@ > #include <wtf/GraphNodeWorklist.h> > #include <wtf/SimpleStats.h> > >+#include "MacroAssemblerPrinter.h" >+ > namespace JSC { > namespace JITInternal { > static constexpr const bool verbose = false; > } > >+static constexpr bool verboseProbes = false; >+ > Seconds totalBaselineCompileTime; > Seconds totalDFGCompileTime; > Seconds totalFTLCompileTime; >@@ -77,7 +82,6 @@ void ctiPatchCallByReturnAddress(ReturnA > JIT::JIT(VM* vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) > : JSInterfaceJIT(vm, codeBlock) > , m_interpreter(vm->interpreter) >- , m_labels(codeBlock ? codeBlock->instructions().size() : 0) > , m_bytecodeOffset(std::numeric_limits<unsigned>::max()) > , m_pcToCodeOriginMapBuilder(*vm) > , m_canBeOptimized(false) >@@ -85,6 +89,9 @@ JIT::JIT(VM* vm, CodeBlock* codeBlock, u > , m_shouldUseIndexMasking(Options::enableSpectreMitigations()) > , m_loopOSREntryBytecodeOffset(loopOSREntryBytecodeOffset) > { >+ RefPtr<JITCode> jitCode = codeBlock->jitCode(); >+ if (jitCode && jitCode->isTraceletJITCode()) >+ m_priorCode = static_cast<TraceletJITCode*>(jitCode.get()); > } > > JIT::~JIT() >@@ -187,13 +194,16 @@ void JIT::privateCompileMainPass() > jitAssertArgumentCountSane(); > > auto& instructions = m_codeBlock->instructions(); >- unsigned instructionCount = m_codeBlock->instructions().size(); > > m_callLinkInfoIndex = 0; > >- VM& vm = *m_codeBlock->vm(); >+ // OOPS: Don't keep recompiling the same traces! >+ //VM& vm = *m_codeBlock->vm(); > unsigned startBytecodeOffset = 0; >- if (m_loopOSREntryBytecodeOffset && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) { >+ >+ // OOPS: probs not needed anymore. >+ /* >+ if (!m_isTracelet && m_loopOSREntryBytecodeOffset && (m_codeBlock->inherits<ProgramCodeBlock>(vm) || m_codeBlock->inherits<ModuleProgramCodeBlock>(vm))) { > // We can only do this optimization because we execute ProgramCodeBlock's exactly once. > // This optimization would be invalid otherwise. When the LLInt determines it wants to > // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it >@@ -232,230 +242,245 @@ void JIT::privateCompileMainPass() > } > } > } >+ */ > >- for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { >- if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) { >- // We've proven all bytecode instructions up until here are unreachable. >- // Let's ensure that by crashing if it's ever hit. >- breakpoint(); >- } >+ for (const TraceProfile& trace : m_traces) { >+ if (verboseProbes) >+ dataLogLn("Compiling trace: [", trace.start, ", ", trace.end, ")"); >+ >+ for (m_bytecodeOffset = trace.start; m_bytecodeOffset < trace.end; ) { >+ if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) { >+ // We've proven all bytecode instructions up until here are unreachable. >+ // Let's ensure that by crashing if it's ever hit. >+ breakpoint(); >+ } > >- if (m_disassembler) >- m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); >- const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr(); >- ASSERT_WITH_MESSAGE(currentInstruction->size(), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); >+ if (m_disassembler) >+ m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); >+ const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr(); >+ ASSERT_WITH_MESSAGE(currentInstruction->size(), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); > >- m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); >+ m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); > > #if ENABLE(OPCODE_SAMPLING) >- if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. >- sampleInstruction(currentInstruction); >+ if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. >+ sampleInstruction(currentInstruction); > #endif > >- m_labels[m_bytecodeOffset] = label(); >+ m_labels.add(m_bytecodeOffset, label()); >+ if (m_bytecodeOffset == trace.start && verboseProbes) >+ print("Started running trace in: ", m_codeBlock->inferredName().data(), "#", m_codeBlock->hashAsStringIfPossible().data(), " [", trace.start, ", ", trace.end, ")\n"); > >- if (JITInternal::verbose) >- dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); >+ if (JITInternal::verbose) >+ dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); > >- OpcodeID opcodeID = currentInstruction->opcodeID(); >+ OpcodeID opcodeID = currentInstruction->opcodeID(); > >- if (UNLIKELY(m_compilation)) { >- add64( >- TrustedImm32(1), >- AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( >- m_compilation->bytecodes(), m_bytecodeOffset)))->address())); >- } >- >- if (Options::eagerlyUpdateTopCallFrame()) >- updateTopCallFrame(); >+ if (UNLIKELY(m_compilation)) { >+ add64( >+ TrustedImm32(1), >+ AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( >+ m_compilation->bytecodes(), m_bytecodeOffset)))->address())); >+ } >+ >+ if (Options::eagerlyUpdateTopCallFrame()) >+ updateTopCallFrame(); > >- unsigned bytecodeOffset = m_bytecodeOffset; >+ unsigned bytecodeOffset = m_bytecodeOffset; > #if ENABLE(MASM_PROBE) >- if (UNLIKELY(Options::traceBaselineJITExecution())) { >- CodeBlock* codeBlock = m_codeBlock; >- probe([=] (Probe::Context& ctx) { >- dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); >- }); >- } >+ if (UNLIKELY(Options::traceBaselineJITExecution())) { >+ CodeBlock* codeBlock = m_codeBlock; >+ probe([=] (Probe::Context& ctx) { >+ dataLogLn("JIT [", bytecodeOffset, "] ", opcodeNames[opcodeID], " cfr ", RawPointer(ctx.fp()), " @ ", codeBlock); >+ }); >+ } > #endif > >- switch (opcodeID) { >- DEFINE_SLOW_OP(in_by_val) >- DEFINE_SLOW_OP(less) >- DEFINE_SLOW_OP(lesseq) >- DEFINE_SLOW_OP(greater) >- DEFINE_SLOW_OP(greatereq) >- DEFINE_SLOW_OP(is_function) >- DEFINE_SLOW_OP(is_object_or_null) >- DEFINE_SLOW_OP(typeof) >- DEFINE_SLOW_OP(strcat) >- DEFINE_SLOW_OP(push_with_scope) >- DEFINE_SLOW_OP(create_lexical_environment) >- DEFINE_SLOW_OP(get_by_val_with_this) >- DEFINE_SLOW_OP(put_by_id_with_this) >- DEFINE_SLOW_OP(put_by_val_with_this) >- DEFINE_SLOW_OP(resolve_scope_for_hoisting_func_decl_in_eval) >- DEFINE_SLOW_OP(define_data_property) >- DEFINE_SLOW_OP(define_accessor_property) >- DEFINE_SLOW_OP(unreachable) >- DEFINE_SLOW_OP(throw_static_error) >- DEFINE_SLOW_OP(new_array_with_spread) >- DEFINE_SLOW_OP(new_array_buffer) >- DEFINE_SLOW_OP(spread) >- DEFINE_SLOW_OP(get_enumerable_length) >- DEFINE_SLOW_OP(has_generic_property) >- DEFINE_SLOW_OP(get_property_enumerator) >- DEFINE_SLOW_OP(to_index_string) >- DEFINE_SLOW_OP(create_direct_arguments) >- DEFINE_SLOW_OP(create_scoped_arguments) >- DEFINE_SLOW_OP(create_cloned_arguments) >- DEFINE_SLOW_OP(create_rest) >- DEFINE_SLOW_OP(pow) >- >- DEFINE_OP(op_add) >- DEFINE_OP(op_bitnot) >- DEFINE_OP(op_bitand) >- DEFINE_OP(op_bitor) >- DEFINE_OP(op_bitxor) >- DEFINE_OP(op_call) >- DEFINE_OP(op_tail_call) >- DEFINE_OP(op_call_eval) >- DEFINE_OP(op_call_varargs) >- DEFINE_OP(op_tail_call_varargs) >- DEFINE_OP(op_tail_call_forward_arguments) >- DEFINE_OP(op_construct_varargs) >- DEFINE_OP(op_catch) >- DEFINE_OP(op_construct) >- DEFINE_OP(op_create_this) >- DEFINE_OP(op_to_this) >- DEFINE_OP(op_get_argument) >- DEFINE_OP(op_argument_count) >- DEFINE_OP(op_get_rest_length) >- DEFINE_OP(op_check_tdz) >- DEFINE_OP(op_identity_with_profile) >- DEFINE_OP(op_debug) >- DEFINE_OP(op_del_by_id) >- DEFINE_OP(op_del_by_val) >- DEFINE_OP(op_div) >- DEFINE_OP(op_end) >- DEFINE_OP(op_enter) >- DEFINE_OP(op_get_scope) >- DEFINE_OP(op_eq) >- DEFINE_OP(op_eq_null) >- DEFINE_OP(op_below) >- DEFINE_OP(op_beloweq) >- DEFINE_OP(op_try_get_by_id) >- DEFINE_OP(op_in_by_id) >- DEFINE_OP(op_get_by_id) >- DEFINE_OP(op_get_by_id_with_this) >- DEFINE_OP(op_get_by_id_direct) >- DEFINE_OP(op_get_by_val) >- DEFINE_OP(op_overrides_has_instance) >- DEFINE_OP(op_instanceof) >- DEFINE_OP(op_instanceof_custom) >- DEFINE_OP(op_is_empty) >- DEFINE_OP(op_is_undefined) >- DEFINE_OP(op_is_undefined_or_null) >- DEFINE_OP(op_is_boolean) >- DEFINE_OP(op_is_number) >- DEFINE_OP(op_is_object) >- DEFINE_OP(op_is_cell_with_type) >- DEFINE_OP(op_jeq_null) >- DEFINE_OP(op_jfalse) >- DEFINE_OP(op_jmp) >- DEFINE_OP(op_jneq_null) >- DEFINE_OP(op_jneq_ptr) >- DEFINE_OP(op_jless) >- DEFINE_OP(op_jlesseq) >- DEFINE_OP(op_jgreater) >- DEFINE_OP(op_jgreatereq) >- DEFINE_OP(op_jnless) >- DEFINE_OP(op_jnlesseq) >- DEFINE_OP(op_jngreater) >- DEFINE_OP(op_jngreatereq) >- DEFINE_OP(op_jeq) >- DEFINE_OP(op_jneq) >- DEFINE_OP(op_jstricteq) >- DEFINE_OP(op_jnstricteq) >- DEFINE_OP(op_jbelow) >- DEFINE_OP(op_jbeloweq) >- DEFINE_OP(op_jtrue) >- DEFINE_OP(op_loop_hint) >- DEFINE_OP(op_check_traps) >- DEFINE_OP(op_nop) >- DEFINE_OP(op_super_sampler_begin) >- DEFINE_OP(op_super_sampler_end) >- DEFINE_OP(op_lshift) >- DEFINE_OP(op_mod) >- DEFINE_OP(op_mov) >- DEFINE_OP(op_mul) >- DEFINE_OP(op_negate) >- DEFINE_OP(op_neq) >- DEFINE_OP(op_neq_null) >- DEFINE_OP(op_new_array) >- DEFINE_OP(op_new_array_with_size) >- DEFINE_OP(op_new_func) >- DEFINE_OP(op_new_func_exp) >- DEFINE_OP(op_new_generator_func) >- DEFINE_OP(op_new_generator_func_exp) >- DEFINE_OP(op_new_async_func) >- DEFINE_OP(op_new_async_func_exp) >- DEFINE_OP(op_new_async_generator_func) >- DEFINE_OP(op_new_async_generator_func_exp) >- DEFINE_OP(op_new_object) >- DEFINE_OP(op_new_regexp) >- DEFINE_OP(op_not) >- DEFINE_OP(op_nstricteq) >- DEFINE_OP(op_dec) >- DEFINE_OP(op_inc) >- DEFINE_OP(op_profile_type) >- DEFINE_OP(op_profile_control_flow) >- DEFINE_OP(op_get_parent_scope) >- DEFINE_OP(op_put_by_id) >- DEFINE_OP(op_put_by_val_direct) >- DEFINE_OP(op_put_by_val) >- DEFINE_OP(op_put_getter_by_id) >- DEFINE_OP(op_put_setter_by_id) >- DEFINE_OP(op_put_getter_setter_by_id) >- DEFINE_OP(op_put_getter_by_val) >- DEFINE_OP(op_put_setter_by_val) >- >- DEFINE_OP(op_ret) >- DEFINE_OP(op_rshift) >- DEFINE_OP(op_unsigned) >- DEFINE_OP(op_urshift) >- DEFINE_OP(op_set_function_name) >- DEFINE_OP(op_stricteq) >- DEFINE_OP(op_sub) >- DEFINE_OP(op_switch_char) >- DEFINE_OP(op_switch_imm) >- DEFINE_OP(op_switch_string) >- DEFINE_OP(op_throw) >- DEFINE_OP(op_to_number) >- DEFINE_OP(op_to_string) >- DEFINE_OP(op_to_object) >- DEFINE_OP(op_to_primitive) >- >- DEFINE_OP(op_resolve_scope) >- DEFINE_OP(op_get_from_scope) >- DEFINE_OP(op_put_to_scope) >- DEFINE_OP(op_get_from_arguments) >- DEFINE_OP(op_put_to_arguments) >- >- DEFINE_OP(op_has_structure_property) >- DEFINE_OP(op_has_indexed_property) >- DEFINE_OP(op_get_direct_pname) >- DEFINE_OP(op_enumerator_structure_pname) >- DEFINE_OP(op_enumerator_generic_pname) >- >- DEFINE_OP(op_log_shadow_chicken_prologue) >- DEFINE_OP(op_log_shadow_chicken_tail) >- default: >- RELEASE_ASSERT_NOT_REACHED(); >+ switch (opcodeID) { >+ DEFINE_SLOW_OP(in_by_val) >+ DEFINE_SLOW_OP(less) >+ DEFINE_SLOW_OP(lesseq) >+ DEFINE_SLOW_OP(greater) >+ DEFINE_SLOW_OP(greatereq) >+ DEFINE_SLOW_OP(is_function) >+ DEFINE_SLOW_OP(is_object_or_null) >+ DEFINE_SLOW_OP(typeof) >+ DEFINE_SLOW_OP(strcat) >+ DEFINE_SLOW_OP(push_with_scope) >+ DEFINE_SLOW_OP(create_lexical_environment) >+ DEFINE_SLOW_OP(get_by_val_with_this) >+ DEFINE_SLOW_OP(put_by_id_with_this) >+ DEFINE_SLOW_OP(put_by_val_with_this) >+ DEFINE_SLOW_OP(resolve_scope_for_hoisting_func_decl_in_eval) >+ DEFINE_SLOW_OP(define_data_property) >+ DEFINE_SLOW_OP(define_accessor_property) >+ DEFINE_SLOW_OP(unreachable) >+ DEFINE_SLOW_OP(throw_static_error) >+ DEFINE_SLOW_OP(new_array_with_spread) >+ DEFINE_SLOW_OP(new_array_buffer) >+ DEFINE_SLOW_OP(spread) >+ DEFINE_SLOW_OP(get_enumerable_length) >+ DEFINE_SLOW_OP(has_generic_property) >+ DEFINE_SLOW_OP(get_property_enumerator) >+ DEFINE_SLOW_OP(to_index_string) >+ DEFINE_SLOW_OP(create_direct_arguments) >+ DEFINE_SLOW_OP(create_scoped_arguments) >+ DEFINE_SLOW_OP(create_cloned_arguments) >+ DEFINE_SLOW_OP(create_rest) >+ DEFINE_SLOW_OP(pow) >+ >+ DEFINE_OP(op_add) >+ DEFINE_OP(op_bitnot) >+ DEFINE_OP(op_bitand) >+ DEFINE_OP(op_bitor) >+ DEFINE_OP(op_bitxor) >+ DEFINE_OP(op_call) >+ DEFINE_OP(op_tail_call) >+ DEFINE_OP(op_call_eval) >+ DEFINE_OP(op_call_varargs) >+ DEFINE_OP(op_tail_call_varargs) >+ DEFINE_OP(op_tail_call_forward_arguments) >+ DEFINE_OP(op_construct_varargs) >+ DEFINE_OP(op_catch) >+ DEFINE_OP(op_construct) >+ DEFINE_OP(op_create_this) >+ DEFINE_OP(op_to_this) >+ DEFINE_OP(op_get_argument) >+ DEFINE_OP(op_argument_count) >+ DEFINE_OP(op_get_rest_length) >+ DEFINE_OP(op_check_tdz) >+ DEFINE_OP(op_identity_with_profile) >+ DEFINE_OP(op_debug) >+ DEFINE_OP(op_del_by_id) >+ DEFINE_OP(op_del_by_val) >+ DEFINE_OP(op_div) >+ DEFINE_OP(op_end) >+ DEFINE_OP(op_enter) >+ DEFINE_OP(op_get_scope) >+ DEFINE_OP(op_eq) >+ DEFINE_OP(op_eq_null) >+ DEFINE_OP(op_below) >+ DEFINE_OP(op_beloweq) >+ DEFINE_OP(op_try_get_by_id) >+ DEFINE_OP(op_in_by_id) >+ DEFINE_OP(op_get_by_id) >+ DEFINE_OP(op_get_by_id_with_this) >+ DEFINE_OP(op_get_by_id_direct) >+ DEFINE_OP(op_get_by_val) >+ DEFINE_OP(op_overrides_has_instance) >+ DEFINE_OP(op_instanceof) >+ DEFINE_OP(op_instanceof_custom) >+ DEFINE_OP(op_is_empty) >+ DEFINE_OP(op_is_undefined) >+ DEFINE_OP(op_is_undefined_or_null) >+ DEFINE_OP(op_is_boolean) >+ DEFINE_OP(op_is_number) >+ DEFINE_OP(op_is_object) >+ DEFINE_OP(op_is_cell_with_type) >+ DEFINE_OP(op_jeq_null) >+ DEFINE_OP(op_jfalse) >+ DEFINE_OP(op_jmp) >+ DEFINE_OP(op_jneq_null) >+ DEFINE_OP(op_jneq_ptr) >+ DEFINE_OP(op_jless) >+ DEFINE_OP(op_jlesseq) >+ DEFINE_OP(op_jgreater) >+ DEFINE_OP(op_jgreatereq) >+ DEFINE_OP(op_jnless) >+ DEFINE_OP(op_jnlesseq) >+ DEFINE_OP(op_jngreater) >+ DEFINE_OP(op_jngreatereq) >+ DEFINE_OP(op_jeq) >+ DEFINE_OP(op_jneq) >+ DEFINE_OP(op_jstricteq) >+ DEFINE_OP(op_jnstricteq) >+ DEFINE_OP(op_jbelow) >+ DEFINE_OP(op_jbeloweq) >+ DEFINE_OP(op_jtrue) >+ DEFINE_OP(op_loop_hint) >+ DEFINE_OP(op_trace_hint) >+ DEFINE_OP(op_check_traps) >+ DEFINE_OP(op_nop) >+ DEFINE_OP(op_super_sampler_begin) >+ DEFINE_OP(op_super_sampler_end) >+ DEFINE_OP(op_lshift) >+ DEFINE_OP(op_mod) >+ DEFINE_OP(op_mov) >+ DEFINE_OP(op_mul) >+ DEFINE_OP(op_negate) >+ DEFINE_OP(op_neq) >+ DEFINE_OP(op_neq_null) >+ DEFINE_OP(op_new_array) >+ DEFINE_OP(op_new_array_with_size) >+ DEFINE_OP(op_new_func) >+ DEFINE_OP(op_new_func_exp) >+ DEFINE_OP(op_new_generator_func) >+ DEFINE_OP(op_new_generator_func_exp) >+ DEFINE_OP(op_new_async_func) >+ DEFINE_OP(op_new_async_func_exp) >+ DEFINE_OP(op_new_async_generator_func) >+ DEFINE_OP(op_new_async_generator_func_exp) >+ DEFINE_OP(op_new_object) >+ DEFINE_OP(op_new_regexp) >+ DEFINE_OP(op_not) >+ DEFINE_OP(op_nstricteq) >+ DEFINE_OP(op_dec) >+ DEFINE_OP(op_inc) >+ DEFINE_OP(op_profile_type) >+ DEFINE_OP(op_profile_control_flow) >+ DEFINE_OP(op_get_parent_scope) >+ DEFINE_OP(op_put_by_id) >+ DEFINE_OP(op_put_by_val_direct) >+ DEFINE_OP(op_put_by_val) >+ DEFINE_OP(op_put_getter_by_id) >+ DEFINE_OP(op_put_setter_by_id) >+ DEFINE_OP(op_put_getter_setter_by_id) >+ DEFINE_OP(op_put_getter_by_val) >+ DEFINE_OP(op_put_setter_by_val) >+ >+ DEFINE_OP(op_ret) >+ DEFINE_OP(op_rshift) >+ DEFINE_OP(op_unsigned) >+ DEFINE_OP(op_urshift) >+ DEFINE_OP(op_set_function_name) >+ DEFINE_OP(op_stricteq) >+ DEFINE_OP(op_sub) >+ DEFINE_OP(op_switch_char) >+ DEFINE_OP(op_switch_imm) >+ DEFINE_OP(op_switch_string) >+ DEFINE_OP(op_throw) >+ DEFINE_OP(op_to_number) >+ DEFINE_OP(op_to_string) >+ DEFINE_OP(op_to_object) >+ DEFINE_OP(op_to_primitive) >+ >+ DEFINE_OP(op_resolve_scope) >+ DEFINE_OP(op_get_from_scope) >+ DEFINE_OP(op_put_to_scope) >+ DEFINE_OP(op_get_from_arguments) >+ DEFINE_OP(op_put_to_arguments) >+ >+ DEFINE_OP(op_has_structure_property) >+ DEFINE_OP(op_has_indexed_property) >+ DEFINE_OP(op_get_direct_pname) >+ DEFINE_OP(op_enumerator_structure_pname) >+ DEFINE_OP(op_enumerator_generic_pname) >+ >+ DEFINE_OP(op_log_shadow_chicken_prologue) >+ DEFINE_OP(op_log_shadow_chicken_tail) >+ default: >+ RELEASE_ASSERT_NOT_REACHED(); >+ } >+ >+ if (JITInternal::verbose) >+ dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n"); > } > >- if (JITInternal::verbose) >- dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n"); >+ if (m_isTracelet) { >+ if (verboseProbes) >+ dataLogLn("end of trace going to: ", trace.end); >+ m_jmpTable.append(JumpTable(jump(), trace.end)); >+ } > } > > RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); >@@ -468,9 +493,158 @@ void JIT::privateCompileMainPass() > > void JIT::privateCompileLinkPass() > { >- unsigned jmpTableCount = m_jmpTable.size(); >- for (unsigned i = 0; i < jmpTableCount; ++i) >- m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this); >+ // OOPS: concurrency issue here when we look it up in JIT code! >+ // OOPS: Link to things in JITCode! >+ >+ auto emitJumpToLLInt = [&] (unsigned bytecodeOffset) { >+ if (verboseProbes) >+ dataLogLn("compiling jump to LLInt bc#", bytecodeOffset); >+ const Instruction& currentInstruction = *m_codeBlock->instructions().at(bytecodeOffset).ptr(); >+ MacroAssemblerCodePtr<JSEntryPtrTag> destination; >+ if (currentInstruction.isWide()) >+ destination = LLInt::getCodePtr<JSEntryPtrTag>(op_wide); // OOPS! skip over this, but we need to increment PC, etc. >+ else >+ destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction.opcodeID()); >+ >+ auto start = label(); >+ // OOPS: Abstract LLInt registers since this is x86 specific! >+ move(TrustedImmPtr(m_codeBlock->metadataTable()), GPRInfo::regCS1); >+ move(TrustedImmPtr(m_codeBlock->instructionsRawPointer()), GPRInfo::regCS2); >+ move(TrustedImm32(bytecodeOffset), GPRInfo::regT4); >+ if (verboseProbes) >+ print("Exiting trace to LLInt: bc#", bytecodeOffset, " to: ", RawPointer(destination.executableAddress()), " is wide: ", currentInstruction.isWide(), " in codeblock: ", m_codeBlock->inferredName().data(), "#", m_codeBlock->hashAsStringIfPossible().data(), "\n"); >+ move(TrustedImmPtr(destination.executableAddress()), GPRInfo::regT0); >+ jump(GPRInfo::regT0, JSEntryPtrTag); >+ >+ m_jumpsToLLInt.add(bytecodeOffset, Vector<std::pair<Label, Label>>()).iterator->value.append({ start, label() }); >+ }; >+ >+ auto getLocalBytecode = [&] (unsigned bytecodeOffset) -> Optional<Label> { >+ auto iter = m_labels.find(bytecodeOffset); >+ if (iter != m_labels.end()) >+ return iter->value; >+ return WTF::nullopt; >+ }; >+ >+ auto getPriorBytecode = [&] (unsigned bytecodeOffset) -> Optional<CodeLocationLabel<JITTraceletPtrTag>> { >+ if (!m_priorCode) >+ return WTF::nullopt; >+ >+ // OOPS: Same concurrency dependency as described above. Should we allow for it? >+ auto iter = m_priorCode->m_codeLocations.find(bytecodeOffset); >+ if (iter != m_priorCode->m_codeLocations.end()) >+ return CodeLocationLabel<JITTraceletPtrTag>(iter->value); >+ >+ return WTF::nullopt; >+ }; >+ >+ for (const JumpTable& entry : m_jmpTable) { >+ unsigned bytecodeOffset = entry.toBytecodeOffset; >+ if (verboseProbes) >+ dataLogLn("Have jump table entry to: bc#", bytecodeOffset); >+ >+ if (auto label = getLocalBytecode(bytecodeOffset)) { >+ entry.from.linkTo(*label, this); >+ continue; >+ } >+ >+ if (auto priorCode = getPriorBytecode(bytecodeOffset)) { >+ Jump from = entry.from; >+ addLinkTask([=] (LinkBuffer& linkBuffer) { >+ linkBuffer.link(from, *priorCode); >+ }); >+ continue; >+ } >+ >+ RELEASE_ASSERT(m_isTracelet); >+ entry.from.linkTo(label(), this); >+ >+ if (bytecodeOffset >= m_codeBlock->instructionCount()) { >+ if (verboseProbes) >+ dataLogLn("Have jump table entry exceeding instructionCount() bc#", bytecodeOffset); >+ // This is the ending trace. We should never get here in bytecode, e.g, >+ // we should have returned, jumped, or done something to terminate execution >+ // of this code. >+ breakpoint(); >+ continue; >+ } >+ >+ emitJumpToLLInt(bytecodeOffset); >+ } >+ >+ // Translate vPC offsets into addresses in JIT generated code, for switch tables. >+ for (auto& record : m_switches) { >+ Vector<Label> jumpDestinations; >+ auto appendDestination = [&] (CodeLocationLabel<JSSwitchPtrTag>& ctiOffset, unsigned bytecodeOffset) { >+ if (auto label = getLocalBytecode(bytecodeOffset)) { >+ jumpDestinations.append(*label); >+ return; >+ } >+ >+ if (auto priorCode = getPriorBytecode(bytecodeOffset)) { >+ jumpDestinations.append(Label()); >+ ctiOffset = priorCode->retagged<JSSwitchPtrTag>(); >+ return; >+ } >+ >+ jumpDestinations.append(label()); >+ emitJumpToLLInt(bytecodeOffset); >+ }; >+ >+ unsigned bytecodeOffset = record.bytecodeOffset; >+ >+ if (record.type != SwitchRecord::String) { >+ ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); >+ ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); >+ >+ auto* simpleJumpTable = record.jumpTable.simpleJumpTable; >+ appendDestination(simpleJumpTable->ctiDefault, bytecodeOffset + record.defaultOffset); // First is the 'default' case. >+ >+ for (unsigned i = 0; i < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++i) { >+ unsigned offset = simpleJumpTable->branchOffsets[i]; >+ if (offset) >+ appendDestination(simpleJumpTable->ctiOffsets[i], bytecodeOffset + offset); >+ else >+ jumpDestinations.append(jumpDestinations[0]); >+ } >+ >+ addLinkTask([=, jumpDestinations = WTFMove(jumpDestinations)] (LinkBuffer& linkBuffer) { >+ if (jumpDestinations[0].isSet()) >+ simpleJumpTable->ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[0]); >+ >+ for (unsigned i = 0; i < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++i) { >+ if (jumpDestinations[i + 1].isSet()) >+ simpleJumpTable->ctiOffsets[i] = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[i + 1]); >+ } >+ }); >+ } else { >+ ASSERT(record.type == SwitchRecord::String); >+ >+ auto* stringJumpTable = record.jumpTable.stringJumpTable; >+ >+ appendDestination(stringJumpTable->ctiDefault, bytecodeOffset + record.defaultOffset); // First is the 'default' case. >+ >+ for (auto& location : stringJumpTable->offsetTable.values()) { >+ unsigned offset = location.branchOffset; >+ if (offset) >+ appendDestination(location.ctiOffset, bytecodeOffset + offset); >+ else >+ jumpDestinations.append(jumpDestinations[0]); >+ } >+ >+ addLinkTask([=, jumpDestinations = WTFMove(jumpDestinations)] (LinkBuffer& linkBuffer) { >+ if (jumpDestinations[0].isSet()) >+ stringJumpTable->ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[0]); >+ unsigned i = 1; >+ for (auto& location : stringJumpTable->offsetTable.values()) { >+ if (jumpDestinations[i].isSet()) >+ location.ctiOffset = linkBuffer.locationOf<JSSwitchPtrTag>(jumpDestinations[i]); >+ ++i; >+ } >+ }); >+ } >+ } >+ > m_jmpTable.clear(); > } > >@@ -545,6 +719,7 @@ void JIT::privateCompileSlowCases() > DEFINE_SLOWCASE_OP(op_jneq) > DEFINE_SLOWCASE_OP(op_jstricteq) > DEFINE_SLOWCASE_OP(op_jnstricteq) >+ DEFINE_SLOWCASE_OP(op_trace_hint) > DEFINE_SLOWCASE_OP(op_loop_hint) > DEFINE_SLOWCASE_OP(op_check_traps) > DEFINE_SLOWCASE_OP(op_mod) >@@ -653,6 +828,7 @@ void JIT::compileWithoutLinking(JITCompi > > if (UNLIKELY(Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))) > m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock); >+ > if (UNLIKELY(m_vm->m_perBytecodeProfiler)) { > m_compilation = adoptRef( > new Profiler::Compilation( >@@ -660,7 +836,83 @@ void JIT::compileWithoutLinking(JITCompi > Profiler::Baseline)); > m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock); > } >- >+ >+ unsigned instructionCount = m_codeBlock->instructions().size(); >+ if (m_isTracelet) { >+ //if (m_requiredBytecodeToCompile) >+ // dataLogLn("Required bytecode to compile = bc#", *m_requiredBytecodeToCompile); >+ const Instruction* firstTraceHint = nullptr; >+ for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) { >+ const Instruction* currentInstruction = m_codeBlock->instructions().at(bytecodeOffset).ptr(); >+ if (currentInstruction->opcodeID() == op_trace_hint) { >+ auto bytecode = currentInstruction->as<OpTraceHint>(); >+ auto& metadata = bytecode.metadata(m_codeBlock); >+ >+ //dataLogLn("Looking at trace: [", metadata.m_traceProfile.start, ",", metadata.m_traceProfile.end, ")"); >+ bool shouldCompile = [&] { >+ if (!firstTraceHint && m_requiredBytecodeToCompile && *m_requiredBytecodeToCompile <= metadata.m_traceProfile.start) >+ metadata.m_shouldCompile = true; >+ >+ if (m_requiredBytecodeToCompile && metadata.m_traceProfile.start <= *m_requiredBytecodeToCompile && *m_requiredBytecodeToCompile < metadata.m_traceProfile.end) { >+ //dataLogLn("compiling trace that contains m_requiredBytecodeToCompile bc#", *m_requiredBytecodeToCompile); >+ metadata.m_shouldCompile = true; >+ } >+ >+ // Haven't tripped tier up yet. >+ if (!metadata.m_shouldCompile) { >+ //dataLogLn("\t! should compile"); >+ return false; >+ } >+ >+ // Already compiled. >+ if (metadata.m_entrypoint) { >+ //dataLogLn("\thas entrypoint already"); >+ //dataLogLn("\tin hash table: ", m_priorCode->m_codeLocations.contains(bytecodeOffset)); >+ return false; >+ } >+ >+ // OOPS: This is only safe to do because we: >+ // - Link on the main thread. >+ // - Never compile the same CodeBlock* concurrently. >+ // Is this ok to rely on? >+ if (m_priorCode && m_priorCode->m_codeLocations.contains(bytecodeOffset)) { >+ //dataLogLn("\talready compiled in code locs"); >+ // Already compiled. >+ return false; >+ } >+ >+ return true; >+ }(); >+ >+ if (!firstTraceHint) { >+ firstTraceHint = currentInstruction; >+ if (shouldCompile) { >+ m_traces.append(TraceProfile { 0, metadata.m_traceProfile.start }); >+ m_isCompilingPrologue = true; >+ } >+ } >+ >+ if (shouldCompile) { >+ //dataLogLn("compiling trace: [", metadata.m_traceProfile.start, ", ", metadata.m_traceProfile.end, ")"); >+ m_traces.append(metadata.m_traceProfile); >+ } >+ } >+ >+ bytecodeOffset += currentInstruction->size(); >+ } >+ } else { >+ TraceProfile wholeTrace; >+ wholeTrace.start = 0; >+ wholeTrace.end = instructionCount; >+ m_traces.append(wholeTrace); >+ m_isCompilingPrologue = true; >+ } >+ >+ if (!m_traces.size()) { >+ //dataLogLn("No traces to compile!"); >+ // OOPS: Make eager options really affect tier up threshold. >+ } >+ > m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr)); > > Label entryLabel(this); >@@ -671,106 +923,120 @@ void JIT::compileWithoutLinking(JITCompi > if (random() & 1) > nop(); > >- emitFunctionPrologue(); >- emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >+ if (m_isCompilingPrologue) { >+ emitFunctionPrologue(); >+ emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >+ } > > Label beginLabel(this); >+ if (verboseProbes) >+ print("executing JIT prologue for: ", m_codeBlock->inferredName().data(), "#", m_codeBlock->hashAsStringIfPossible().data(), "\n"); > >- sampleCodeBlock(m_codeBlock); >+ JumpList stackOverflow; >+ if (m_isCompilingPrologue) { >+ sampleCodeBlock(m_codeBlock); > #if ENABLE(OPCODE_SAMPLING) >- sampleInstruction(m_codeBlock->instructions().begin()); >+ sampleInstruction(m_codeBlock->instructions().begin()); > #endif > >- if (m_codeBlock->codeType() == FunctionCode) { >- ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); >- if (shouldEmitProfiling()) { >- for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { >- // If this is a constructor, then we want to put in a dummy profiling site (to >- // keep things consistent) but we don't actually want to record the dummy value. >- if (m_codeBlock->isConstructor() && !argument) >- continue; >- int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); >+ if (m_codeBlock->codeType() == FunctionCode) { >+ ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); >+ if (shouldEmitProfiling()) { >+ for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { >+ // If this is a constructor, then we want to put in a dummy profiling site (to >+ // keep things consistent) but we don't actually want to record the dummy value. >+ if (m_codeBlock->isConstructor() && !argument) >+ continue; >+ int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); > #if USE(JSVALUE64) >- load64(Address(callFrameRegister, offset), regT0); >+ load64(Address(callFrameRegister, offset), regT0); > #elif USE(JSVALUE32_64) >- load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); >- load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); >+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); >+ load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); > #endif >- emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); >+ emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); >+ } > } > } >- } > >- int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register); >- unsigned maxFrameSize = -frameTopOffset; >- addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1); >- JumpList stackOverflow; >- if (UNLIKELY(maxFrameSize > Options::reservedZoneSize())) >- stackOverflow.append(branchPtr(Above, regT1, callFrameRegister)); >- stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1)); >- >- move(regT1, stackPointerRegister); >- checkStackPointerAlignment(); >- if (Options::zeroStackFrame()) >- clearStackFrame(callFrameRegister, stackPointerRegister, regT0, maxFrameSize); >+ int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register); >+ unsigned maxFrameSize = -frameTopOffset; >+ addPtr(TrustedImm32(frameTopOffset), callFrameRegister, regT1); >+ if (UNLIKELY(maxFrameSize > Options::reservedZoneSize())) >+ stackOverflow.append(branchPtr(Above, regT1, callFrameRegister)); >+ stackOverflow.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1)); >+ >+ move(regT1, stackPointerRegister); >+ checkStackPointerAlignment(); >+ if (Options::zeroStackFrame()) >+ clearStackFrame(callFrameRegister, stackPointerRegister, regT0, maxFrameSize); > >- emitSaveCalleeSaves(); >- emitMaterializeTagCheckRegisters(); >+ emitSaveCalleeSaves(); >+ emitMaterializeTagCheckRegisters(); >+ } > >- RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType())); >+ //RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType())); > > privateCompileMainPass(); >- privateCompileLinkPass(); >+ //privateCompileLinkPass(); >+ if (m_disassembler) >+ m_disassembler->setStartOfSlowPath(label()); > privateCompileSlowCases(); > > if (m_disassembler) > m_disassembler->setEndOfSlowPath(label()); > m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); > >- stackOverflow.link(this); >- m_bytecodeOffset = 0; >- if (maxFrameExtentForSlowPathCall) >- addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); >- callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); >- >- // If the number of parameters is 1, we never require arity fixup. >- bool requiresArityFixup = m_codeBlock->m_numParameters != 1; >- if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) { >- m_arityCheck = label(); >- store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); >- emitFunctionPrologue(); >- emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >- >- load32(payloadFor(CallFrameSlot::argumentCount), regT1); >- branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); >- >+ if (m_isCompilingPrologue) { >+ stackOverflow.link(this); > m_bytecodeOffset = 0; >- > if (maxFrameExtentForSlowPathCall) > addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); >- callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck); >- if (maxFrameExtentForSlowPathCall) >- addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); >- branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); >- move(returnValueGPR, GPRInfo::argumentGPR0); >- emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>()); >+ callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); >+ } >+ >+ if (m_isCompilingPrologue) { >+ // If the number of parameters is 1, we never require arity fixup. >+ bool requiresArityFixup = m_codeBlock->m_numParameters != 1; >+ if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) { >+ m_arityCheck = label(); >+ if (verboseProbes) >+ print("executing JIT arity check prologue for: ", m_codeBlock->inferredName().data(), "\n"); >+ store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); >+ emitFunctionPrologue(); >+ emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); >+ >+ load32(payloadFor(CallFrameSlot::argumentCount), regT1); >+ branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); >+ >+ m_bytecodeOffset = 0; >+ >+ if (maxFrameExtentForSlowPathCall) >+ addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); >+ callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck); >+ if (maxFrameExtentForSlowPathCall) >+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); >+ branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); >+ move(returnValueGPR, GPRInfo::argumentGPR0); >+ emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>()); > > #if !ASSERT_DISABLED >- m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. >+ m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. > #endif > >- jump(beginLabel); >- } else >- m_arityCheck = entryLabel; // Never require arity fixup. >+ jump(beginLabel); >+ } else >+ m_arityCheck = entryLabel; // Never require arity fixup. >+ } > >- ASSERT(m_jmpTable.isEmpty()); >- >+ privateCompileLinkPass(); > privateCompileExceptionHandlers(); > > if (m_disassembler) > m_disassembler->setEndOfCode(label()); > m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); > >+ // OOPS: Need to clear stale codeblock data on fails! > m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*this, m_codeBlock, effort)); > > MonotonicTime after { }; >@@ -789,48 +1055,22 @@ void JIT::compileWithoutLinking(JITCompi > > CompilationResult JIT::link() > { >+ if (m_isTracelet && m_traces.isEmpty()) >+ return CompilationDeferred; >+ > LinkBuffer& patchBuffer = *m_linkBuffer; > > if (patchBuffer.didFailToAllocate()) > return CompilationFailed; > >- // Translate vPC offsets into addresses in JIT generated code, for switch tables. >- for (auto& record : m_switches) { >- unsigned bytecodeOffset = record.bytecodeOffset; >- >- if (record.type != SwitchRecord::String) { >- ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); >- ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); >- >- auto* simpleJumpTable = record.jumpTable.simpleJumpTable; >- simpleJumpTable->ctiDefault = patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); >- >- for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { >- unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; >- simpleJumpTable->ctiOffsets[j] = offset >- ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset]) >- : simpleJumpTable->ctiDefault; >- } >- } else { >- ASSERT(record.type == SwitchRecord::String); >- >- auto* stringJumpTable = record.jumpTable.stringJumpTable; >- stringJumpTable->ctiDefault = >- patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + record.defaultOffset]); >- >- for (auto& location : stringJumpTable->offsetTable.values()) { >- unsigned offset = location.branchOffset; >- location.ctiOffset = offset >- ? patchBuffer.locationOf<JSSwitchPtrTag>(m_labels[bytecodeOffset + offset]) >- : stringJumpTable->ctiDefault; >- } >- } >- } >- > for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { > HandlerInfo& handler = m_codeBlock->exceptionHandler(i); >+ >+ // OOPS: handle this! >+ UNUSED_PARAM(handler); >+ > // FIXME: <rdar://problem/39433318>. >- handler.nativeCode = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_labels[handler.target]); >+ //handler.nativeCode = patchBuffer.locationOf<ExceptionHandlerPtrTag>(m_labels[handler.target]); > } > > for (auto& record : m_calls) { >@@ -878,15 +1118,12 @@ CompilationResult JIT::link() > patchBuffer.locationOfNearCall<JSInternalPtrTag>(compilationInfo.hotPathOther)); > } > >- JITCodeMap jitCodeMap; >- for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { >- if (m_labels[bytecodeOffset].isSet()) >- jitCodeMap.append(bytecodeOffset, patchBuffer.locationOf<JSEntryPtrTag>(m_labels[bytecodeOffset])); >- } >- jitCodeMap.finish(); >- m_codeBlock->setJITCodeMap(WTFMove(jitCodeMap)); >- >- MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck); >+ //JITCodeMap jitCodeMap; >+ //for (auto entry : m_labels) { >+ // jitCodeMap.append(entry.key, patchBuffer.locationOf<JSEntryPtrTag>(entry.value)); >+ //} >+ //jitCodeMap.finish(); >+ //m_codeBlock->setJITCodeMap(WTFMove(jitCodeMap)); > > if (Options::dumpDisassembly()) { > m_disassembler->dump(patchBuffer); >@@ -903,15 +1140,84 @@ CompilationResult JIT::link() > > CodeRef<JSEntryPtrTag> result = FINALIZE_CODE( > patchBuffer, JSEntryPtrTag, >- "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()); >+ "Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITType::BaselineJIT)).data()); > >+ // OOPS: this is now wrong. Fix to be # insns in trace. > m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add( > static_cast<double>(result.size()) / > static_cast<double>(m_codeBlock->instructionCount())); > >- m_codeBlock->shrinkToFit(CodeBlock::LateShrink); >- m_codeBlock->setJITCode( >- adoptRef(*new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT))); >+ TraceletJITCode* traceletJITCode; >+ RefPtr<JITCode> jitCode = m_codeBlock->jitCode(); >+ if (!jitCode) { >+ // OOPS: should shrinkToFit perhaps for all of these? This code path is only taken when useLLInt=0 >+ //m_codeBlock->shrinkToFit(CodeBlock::LateShrink); >+ MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck); >+ traceletJITCode = new TraceletJITCode(result, withArityCheck, JITType::BaselineJIT); >+ m_codeBlock->setJITCode( >+ adoptRef(*traceletJITCode)); >+ } else if (jitCode->isTraceletJITCode()) >+ traceletJITCode = static_cast<TraceletJITCode*>(jitCode.get()); >+ else { >+ RELEASE_ASSERT(jitCode->isJITCodeWithCodeRef()); >+ JITCodeWithCodeRef* jitCodeWithCodeRef = static_cast<JITCodeWithCodeRef*>(jitCode.get()); >+ MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = jitCodeWithCodeRef->codeRef(); >+ MacroAssemblerCodePtr<JSEntryPtrTag> arityCheck = jitCodeWithCodeRef->addressForCall(MustCheckArity); >+ traceletJITCode = new TraceletJITCode(codeRef, arityCheck, JITType::BaselineJIT); >+ //dataLogLn("Allocated tracelet JIT code: ", RawPointer(traceletJITCode)); >+ m_codeBlock->setJITCode(adoptRef(*traceletJITCode)); >+ } >+ >+ //traceletJITCode->m_codeRefs.append(WTFMove(result)); >+ traceletJITCode->m_codeRefs.append(result.retagged<JITTraceletPtrTag>()); >+ >+ for (const auto& entry : m_jumpsToLLInt) { >+ for (auto pair : entry.value) { >+ Label start = pair.first; >+ Label end = pair.second; >+ MacroAssemblerCodePtr<JITTraceletPtrTag> startPtr = patchBuffer.locationOf<JITTraceletPtrTag>(start); >+ MacroAssemblerCodePtr<JITTraceletPtrTag> endPtr = patchBuffer.locationOf<JITTraceletPtrTag>(end); >+ >+ traceletJITCode->m_jumpsToLLIntBytecode.add(entry.key, Vector<std::pair<MacroAssemblerCodePtr<JITTraceletPtrTag>, unsigned>>()).iterator->value.append( >+ { startPtr, endPtr.dataLocation<uintptr_t>() - startPtr.dataLocation<uintptr_t>() }); >+ } >+ } >+ >+ if (m_isCompilingPrologue) { >+ if (verboseProbes) >+ dataLogLn("compiling prologue: ", RawPointer(result.code().executableAddress())); >+ traceletJITCode->installPrologue(result, patchBuffer.locationOf<JSEntryPtrTag>(m_arityCheck)); >+ } >+ >+ for (auto entry : m_labels) { >+ unsigned bytecodeOffset = entry.key; >+ auto codeLabel = patchBuffer.locationOf<JSEntryPtrTag>(entry.value); >+ traceletJITCode->m_codeLocations.add(bytecodeOffset, codeLabel.retagged<JITTraceletPtrTag>()); >+ >+ auto iter = traceletJITCode->m_jumpsToLLIntBytecode.find(bytecodeOffset); >+ if (iter != traceletJITCode->m_jumpsToLLIntBytecode.end()) { >+ if (verboseProbes) >+ dataLogLn("repatching prior to LLInt jump to go to new JIT code: bc#", bytecodeOffset); >+ for (auto pair : iter->value) { >+ CCallHelpers jit; >+ auto jump = jit.jump(); >+ >+ while (jit.m_assembler.codeSize() < pair.second) >+ jit.breakpoint(); >+ >+ LinkBuffer linkBuffer(jit, pair.first, pair.second, JITCompilationCanFail); >+ if (linkBuffer.isValid()) { >+ // OOPS: We could probs just assert this! >+ if (verboseProbes) >+ dataLogLn("\trepatching from: ", RawPointer(pair.first.dataLocation())); >+ linkBuffer.link(jump, codeLabel); >+ FINALIZE_CODE(linkBuffer, NoPtrTag, "TraceletJIT: linking constant jump to away from LLInt to newly allocated JIT code for bc#%d", bytecodeOffset); >+ } >+ } >+ >+ traceletJITCode->m_jumpsToLLIntBytecode.remove(iter); >+ } >+ } > > if (JITInternal::verbose) > dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr()); >@@ -983,6 +1289,8 @@ unsigned JIT::frameRegisterCountFor(Code > > int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) > { >+ //return - codeBlock->numCalleeLocals(); >+ // OOPS: This sort of disagrees with the LLInt? > return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); > } > >Index: Source/JavaScriptCore/jit/JIT.h >=================================================================== >--- Source/JavaScriptCore/jit/JIT.h (revision 244505) >+++ Source/JavaScriptCore/jit/JIT.h (working copy) >@@ -200,9 +200,12 @@ namespace JSC { > > void doMainThreadPreparationBeforeCompile(); > >- static CompilationResult compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, unsigned bytecodeOffset = 0) >+ static CompilationResult compileNow(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, unsigned requiredBytecodeOffset, bool isTracelet) > { >- return JIT(vm, codeBlock, bytecodeOffset).privateCompile(effort); >+ JIT jit(vm, codeBlock); >+ jit.m_isTracelet = isTracelet; >+ jit.m_requiredBytecodeToCompile = requiredBytecodeOffset; >+ return jit.privateCompile(effort); > } > > static void compileGetByVal(const ConcurrentJSLocker& locker, VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) >@@ -571,6 +574,7 @@ namespace JSC { > void emit_op_jbeloweq(const Instruction*); > void emit_op_jtrue(const Instruction*); > void emit_op_loop_hint(const Instruction*); >+ void emit_op_trace_hint(const Instruction*); > void emit_op_check_traps(const Instruction*); > void emit_op_nop(const Instruction*); > void emit_op_super_sampler_begin(const Instruction*); >@@ -669,6 +673,7 @@ namespace JSC { > void emitSlow_op_jnstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_jtrue(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_loop_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&); >+ void emitSlow_op_trace_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&); > void emitSlow_op_mul(const Instruction*, Vector<SlowCaseEntry>::iterator&); >@@ -918,7 +923,8 @@ namespace JSC { > Interpreter* m_interpreter; > > Vector<CallRecord> m_calls; >- Vector<Label> m_labels; >+ //Vector<Label> m_labels; >+ HashMap<unsigned, Label, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_labels; > Vector<JITGetByIdGenerator> m_getByIds; > Vector<JITGetByIdWithThisGenerator> m_getByIdsWithThis; > Vector<JITPutByIdGenerator> m_putByIds; >@@ -963,6 +969,13 @@ namespace JSC { > bool m_shouldEmitProfiling; > bool m_shouldUseIndexMasking; > unsigned m_loopOSREntryBytecodeOffset { 0 }; >+ public: >+ bool m_isTracelet { false }; >+ Optional<unsigned> m_requiredBytecodeToCompile; >+ bool m_isCompilingPrologue { false }; >+ Vector<TraceProfile> m_traces; >+ RefPtr<TraceletJITCode> m_priorCode; >+ HashMap<unsigned, Vector<std::pair<Label, Label>>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_jumpsToLLInt; > }; > > } // namespace JSC >Index: Source/JavaScriptCore/jit/JITCode.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITCode.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/JITCode.cpp (working copy) >@@ -36,26 +36,34 @@ JITCode::JITCode(JITType jitType, ShareA > : m_jitType(jitType) > , m_shareAttribute(shareAttribute) > { >+ //dataLogLn("Allocated JITCode: ", RawPointer(this)); >+ //WTFReportBacktrace(); >+ //dataLogLn(); >+ //dataLogLn(); > } > > JITCode::~JITCode() > { >+ //dataLogLn("Deallocated JITCode: ", RawPointer(this)); >+ //WTFReportBacktrace(); >+ //dataLogLn(); >+ //dataLogLn(); > } > > const char* JITCode::typeName(JITType jitType) > { > switch (jitType) { >- case None: >+ case JITType::None: > return "None"; >- case HostCallThunk: >+ case JITType::HostCallThunk: > return "Host"; >- case InterpreterThunk: >+ case JITType::InterpreterThunk: > return "LLInt"; >- case BaselineJIT: >+ case JITType::BaselineJIT: > return "Baseline"; >- case DFGJIT: >+ case JITType::DFGJIT: > return "DFG"; >- case FTLJIT: >+ case JITType::FTLJIT: > return "FTL"; > default: > CRASH(); >@@ -237,11 +245,17 @@ RegisterSet JITCode::liveRegistersToPres > } > #endif > >+void TraceletJITCode::installPrologue(CodeRef<JSEntryPtrTag> entry, CodePtr<JSEntryPtrTag> withArityCheck) >+{ >+ m_ref = WTFMove(entry); >+ m_withArityCheck = withArityCheck; >+} >+ > } // namespace JSC > > namespace WTF { > >-void printInternal(PrintStream& out, JSC::JITCode::JITType type) >+void printInternal(PrintStream& out, JSC::JITType type) > { > out.print(JSC::JITCode::typeName(type)); > } >Index: Source/JavaScriptCore/jit/JITCode.h >=================================================================== >--- Source/JavaScriptCore/jit/JITCode.h (revision 244505) >+++ Source/JavaScriptCore/jit/JITCode.h (working copy) >@@ -51,50 +51,51 @@ struct ProtoCallFrame; > class TrackedReferences; > class VM; > >+enum class JITType : uint8_t { >+ None, >+ HostCallThunk, >+ InterpreterThunk, >+ BaselineJIT, >+ DFGJIT, >+ FTLJIT >+}; >+ > class JITCode : public ThreadSafeRefCounted<JITCode> { >+ using Base = ThreadSafeRefCounted<JITCode>; > public: > template<PtrTag tag> using CodePtr = MacroAssemblerCodePtr<tag>; > template<PtrTag tag> using CodeRef = MacroAssemblerCodeRef<tag>; > >- enum JITType : uint8_t { >- None, >- HostCallThunk, >- InterpreterThunk, >- BaselineJIT, >- DFGJIT, >- FTLJIT >- }; >- > static const char* typeName(JITType); > > static JITType bottomTierJIT() > { >- return BaselineJIT; >+ return JITType::BaselineJIT; > } > > static JITType topTierJIT() > { >- return FTLJIT; >+ return JITType::FTLJIT; > } > > static JITType nextTierJIT(JITType jitType) > { > switch (jitType) { >- case BaselineJIT: >- return DFGJIT; >- case DFGJIT: >- return FTLJIT; >+ case JITType::BaselineJIT: >+ return JITType::DFGJIT; >+ case JITType::DFGJIT: >+ return JITType::FTLJIT; > default: > RELEASE_ASSERT_NOT_REACHED(); >- return None; >+ return JITType::None; > } > } > > static bool isExecutableScript(JITType jitType) > { > switch (jitType) { >- case None: >- case HostCallThunk: >+ case JITType::None: >+ case JITType::HostCallThunk: > return false; > default: > return true; >@@ -104,8 +105,8 @@ public: > static bool couldBeInterpreted(JITType jitType) > { > switch (jitType) { >- case InterpreterThunk: >- case BaselineJIT: >+ case JITType::InterpreterThunk: >+ case JITType::BaselineJIT: > return true; > default: > return false; >@@ -115,9 +116,9 @@ public: > static bool isJIT(JITType jitType) > { > switch (jitType) { >- case BaselineJIT: >- case DFGJIT: >- case FTLJIT: >+ case JITType::BaselineJIT: >+ case JITType::DFGJIT: >+ case JITType::FTLJIT: > return true; > default: > return false; >@@ -148,12 +149,12 @@ public: > > static bool isOptimizingJIT(JITType jitType) > { >- return jitType == DFGJIT || jitType == FTLJIT; >+ return jitType == JITType::DFGJIT || jitType == JITType::FTLJIT; > } > > static bool isBaselineCode(JITType jitType) > { >- return jitType == InterpreterThunk || jitType == BaselineJIT; >+ return jitType == JITType::InterpreterThunk || jitType == JITType::BaselineJIT; > } > > virtual const DOMJIT::Signature* signature() const { return nullptr; } >@@ -178,7 +179,7 @@ public: > static JITType jitTypeFor(PointerType jitCode) > { > if (!jitCode) >- return None; >+ return JITType::None; > return jitCode->jitType(); > } > >@@ -203,6 +204,9 @@ public: > > virtual bool contains(void*) = 0; > >+ virtual bool isTraceletJITCode() const { return false; } >+ virtual bool isJITCodeWithCodeRef() const { return false; } >+ > #if ENABLE(JIT) > virtual RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex); > virtual Optional<CodeOrigin> findPC(CodeBlock*, void* pc) { UNUSED_PARAM(pc); return WTF::nullopt; } >@@ -233,6 +237,9 @@ public: > size_t size() override; > bool contains(void*) override; > >+ bool isJITCodeWithCodeRef() const override { return true; } >+ CodeRef<JSEntryPtrTag> codeRef() { return m_ref; } >+ > protected: > CodeRef<JSEntryPtrTag> m_ref; > }; >@@ -248,11 +255,42 @@ public: > > protected: > void initializeCodeRefForDFG(CodeRef<JSEntryPtrTag>, CodePtr<JSEntryPtrTag> withArityCheck); >- >-private: > CodePtr<JSEntryPtrTag> m_withArityCheck; > }; > >+class TraceletJITCode : public DirectJITCode { >+ using Base = DirectJITCode; >+public: >+ TraceletJITCode(CodeRef<JSEntryPtrTag> entry, CodePtr<JSEntryPtrTag> withArityCheck, JITType type, JITCode::ShareAttribute shareAttribute = JITCode::ShareAttribute::NotShared) >+ : Base(WTFMove(entry), WTFMove(withArityCheck), type, shareAttribute) >+ { } >+ >+ bool isTraceletJITCode() const override { return true; } >+ >+ MacroAssemblerCodePtr<JITTraceletPtrTag> exceptionCheckWithCallFrameRollback() { return m_exceptionCheckWithCallFrameRollback; } >+ MacroAssemblerCodePtr<JITTraceletPtrTag> exceptionHandler() { return m_exceptionHandler; } >+ MacroAssemblerCodePtr<JITTraceletPtrTag> stackOverflowHandler() { return m_stackOverflowHandler; } >+ >+ void installPrologue(CodeRef<JSEntryPtrTag> entry, CodePtr<JSEntryPtrTag> withArityCheck); >+ >+ MacroAssemblerCodePtr<JITTraceletPtrTag> findCodeLocation(unsigned bytecodeOffset) >+ { >+ auto iter = m_codeLocations.find(bytecodeOffset); >+ if (iter != m_codeLocations.end()) >+ return iter->value; >+ return { }; >+ } >+ >+public: >+//private: >+ HashMap<unsigned, MacroAssemblerCodePtr<JITTraceletPtrTag>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_codeLocations; >+ HashMap<unsigned, Vector<std::pair<MacroAssemblerCodePtr<JITTraceletPtrTag>, unsigned>>, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_jumpsToLLIntBytecode; >+ Vector<CodeRef<JITTraceletPtrTag>> m_codeRefs; >+ MacroAssemblerCodePtr<JITTraceletPtrTag> m_exceptionCheckWithCallFrameRollback; >+ MacroAssemblerCodePtr<JITTraceletPtrTag> m_exceptionHandler; >+ MacroAssemblerCodePtr<JITTraceletPtrTag> m_stackOverflowHandler; >+}; >+ > class NativeJITCode : public JITCodeWithCodeRef { > public: > NativeJITCode(JITType); >@@ -278,6 +316,6 @@ private: > namespace WTF { > > class PrintStream; >-void printInternal(PrintStream&, JSC::JITCode::JITType); >+void printInternal(PrintStream&, JSC::JITType); > > } // namespace WTF >Index: Source/JavaScriptCore/jit/JITCodeMap.h >=================================================================== >--- Source/JavaScriptCore/jit/JITCodeMap.h (revision 244505) >+++ Source/JavaScriptCore/jit/JITCodeMap.h (working copy) >@@ -35,6 +35,7 @@ namespace JSC { > > class JITCodeMap { > private: >+ /* > struct Entry { > Entry() { } > >@@ -44,16 +45,33 @@ private: > { } > > inline unsigned bytecodeIndex() const { return m_bytecodeIndex; } >- inline CodeLocationLabel<JSEntryPtrTag> codeLocation() { return m_codeLocation; } >+ inline CodeLocationLabel<JSEntryPtrTag> codeLocation() const { return m_codeLocation; } > > private: > unsigned m_bytecodeIndex; > CodeLocationLabel<JSEntryPtrTag> m_codeLocation; > }; >+ */ > > public: > void append(unsigned bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation) > { >+ m_entries.add(bytecodeIndex, codeLocation); >+ } >+ void finish() {} >+ >+ CodeLocationLabel<JSEntryPtrTag> find(unsigned bytecodeIndex) const >+ { >+ auto iter = m_entries.find(bytecodeIndex); >+ if (iter == m_entries.end()) >+ return CodeLocationLabel<JSEntryPtrTag>(); >+ return iter->value; >+ } >+ >+ /* >+ >+ void append(unsigned bytecodeIndex, CodeLocationLabel<JSEntryPtrTag> codeLocation) >+ { > m_entries.append({ bytecodeIndex, codeLocation }); > } > >@@ -70,11 +88,13 @@ public: > return CodeLocationLabel<JSEntryPtrTag>(); > return entry->codeLocation(); > } >+ */ > > explicit operator bool() const { return m_entries.size(); } > > private: >- Vector<Entry> m_entries; >+ //Vector<Entry> m_entries; >+ HashMap<unsigned, CodeLocationLabel<JSEntryPtrTag>, DefaultHash<unsigned>::Hash, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_entries; > }; > > } // namespace JSC >Index: Source/JavaScriptCore/jit/JITDisassembler.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITDisassembler.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/JITDisassembler.cpp (working copy) >@@ -53,7 +53,7 @@ JITDisassembler::~JITDisassembler() > void JITDisassembler::dump(PrintStream& out, LinkBuffer& linkBuffer) > { > dumpHeader(out, linkBuffer); >- dumpDisassembly(out, linkBuffer, m_startOfCode, m_labelForBytecodeIndexInMainPath[0]); >+ dumpDisassembly(out, linkBuffer, m_startOfCode, firstFastLabel()); > > dumpForInstructions(out, linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel()); > out.print(" (End Of Main Path)\n"); >@@ -75,7 +75,7 @@ void JITDisassembler::reportToProfiler(P > dumpHeader(out, linkBuffer); > compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), out.toCString())); > out.reset(); >- dumpDisassembly(out, linkBuffer, m_startOfCode, m_labelForBytecodeIndexInMainPath[0]); >+ dumpDisassembly(out, linkBuffer, m_startOfCode, firstFastLabel()); > compilation->addDescription(Profiler::CompiledBytecode(Profiler::OriginStack(), out.toCString())); > > reportInstructions(compilation, linkBuffer, " ", m_labelForBytecodeIndexInMainPath, firstSlowLabel()); >@@ -89,7 +89,7 @@ void JITDisassembler::reportToProfiler(P > > void JITDisassembler::dumpHeader(PrintStream& out, LinkBuffer& linkBuffer) > { >- out.print("Generated Baseline JIT code for ", CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT), ", instruction count = ", m_codeBlock->instructionCount(), "\n"); >+ out.print("Generated Baseline JIT code for ", CodeBlockWithJITType(m_codeBlock, JITType::BaselineJIT), ", instruction count = ", m_codeBlock->instructionCount(), "\n"); > out.print(" Source: ", m_codeBlock->sourceCodeOnOneLine(), "\n"); > out.print(" Code at [", RawPointer(linkBuffer.debugAddress()), ", ", RawPointer(static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.size()), "):\n"); > } >@@ -106,6 +106,15 @@ MacroAssembler::Label JITDisassembler::f > return firstSlowLabel.isSet() ? firstSlowLabel : m_endOfSlowPath; > } > >+MacroAssembler::Label JITDisassembler::firstFastLabel() >+{ >+ for (unsigned i = 0; i < m_labelForBytecodeIndexInMainPath.size(); ++i) { >+ if (m_labelForBytecodeIndexInMainPath[i].isSet()) >+ return m_labelForBytecodeIndexInMainPath[i]; >+ } >+ return m_startOfSlowPath; >+} >+ > Vector<JITDisassembler::DumpedOp> JITDisassembler::dumpVectorForInstructions(LinkBuffer& linkBuffer, const char* prefix, Vector<MacroAssembler::Label>& labels, MacroAssembler::Label endLabel) > { > StringPrintStream out; >Index: Source/JavaScriptCore/jit/JITDisassembler.h >=================================================================== >--- Source/JavaScriptCore/jit/JITDisassembler.h (revision 244505) >+++ Source/JavaScriptCore/jit/JITDisassembler.h (working copy) >@@ -55,6 +55,8 @@ public: > { > m_labelForBytecodeIndexInSlowPath[bytecodeIndex] = label; > } >+ >+ void setStartOfSlowPath(MacroAssembler::Label label) { m_startOfSlowPath = label; } > void setEndOfSlowPath(MacroAssembler::Label label) { m_endOfSlowPath = label; } > void setEndOfCode(MacroAssembler::Label label) { m_endOfCode = label; } > >@@ -64,6 +66,7 @@ public: > > private: > void dumpHeader(PrintStream&, LinkBuffer&); >+ MacroAssembler::Label firstFastLabel(); > MacroAssembler::Label firstSlowLabel(); > > struct DumpedOp { >@@ -81,6 +84,7 @@ private: > MacroAssembler::Label m_startOfCode; > Vector<MacroAssembler::Label> m_labelForBytecodeIndexInMainPath; > Vector<MacroAssembler::Label> m_labelForBytecodeIndexInSlowPath; >+ MacroAssembler::Label m_startOfSlowPath; > MacroAssembler::Label m_endOfSlowPath; > MacroAssembler::Label m_endOfCode; > }; >Index: Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp (working copy) >@@ -47,6 +47,7 @@ JITInlineCacheGenerator::JITInlineCacheG > const RegisterSet& usedRegisters) > : m_codeBlock(codeBlock) > { >+ // OOPS: If we fail a compilation, we need a way to remove all this from the CodeBlock! > m_stubInfo = m_codeBlock ? m_codeBlock->addStubInfo(accessType) : garbageStubInfo(); > m_stubInfo->codeOrigin = codeOrigin; > m_stubInfo->callSiteIndex = callSite; >Index: Source/JavaScriptCore/jit/JITInlines.h >=================================================================== >--- Source/JavaScriptCore/jit/JITInlines.h (revision 244505) >+++ Source/JavaScriptCore/jit/JITInlines.h (working copy) >@@ -247,7 +247,8 @@ ALWAYS_INLINE void JIT::emitJumpSlowToHo > { > ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. > >- jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); >+ //jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); >+ m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset)); > } > > #if ENABLE(SAMPLING_FLAGS) >Index: Source/JavaScriptCore/jit/JITOpcodes.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITOpcodes.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/JITOpcodes.cpp (working copy) >@@ -1007,6 +1007,7 @@ void JIT::emit_op_loop_hint(const Instru > } > } > >+ > void JIT::emitSlow_op_loop_hint(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) > { > #if ENABLE(DFG_JIT) >@@ -1034,6 +1035,13 @@ void JIT::emitSlow_op_loop_hint(const In > #endif > } > >+void JIT::emit_op_trace_hint(const Instruction*) >+{ >+} >+void JIT::emitSlow_op_trace_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&) >+{ >+} >+ > void JIT::emit_op_check_traps(const Instruction*) > { > addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->needTrapHandlingAddress()))); >Index: Source/JavaScriptCore/jit/JITOperations.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITOperations.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/JITOperations.cpp (working copy) >@@ -1427,7 +1427,7 @@ SlowPathReturnType JIT_OPERATION operati > DeferGCForAWhile deferGC(vm.heap); > > CodeBlock* codeBlock = exec->codeBlock(); >- if (UNLIKELY(codeBlock->jitType() != JITCode::BaselineJIT)) { >+ if (UNLIKELY(codeBlock->jitType() != JITType::BaselineJIT)) { > dataLog("Unexpected code block in Baseline->DFG tier-up: ", *codeBlock, "\n"); > RELEASE_ASSERT_NOT_REACHED(); > } >@@ -1660,8 +1660,8 @@ char* JIT_OPERATION operationTryOSREnter > return nullptr; > > switch (optimizedReplacement->jitType()) { >- case JITCode::DFGJIT: >- case JITCode::FTLJIT: { >+ case JITType::DFGJIT: >+ case JITType::FTLJIT: { > MacroAssemblerCodePtr<ExceptionHandlerPtrTag> entry = DFG::prepareCatchOSREntry(exec, optimizedReplacement, bytecodeIndex); > return entry.executableAddress<char*>(); > } >@@ -1682,8 +1682,8 @@ char* JIT_OPERATION operationTryOSREnter > return nullptr; > > switch (optimizedReplacement->jitType()) { >- case JITCode::DFGJIT: >- case JITCode::FTLJIT: { >+ case JITType::DFGJIT: >+ case JITType::FTLJIT: { > MacroAssemblerCodePtr<ExceptionHandlerPtrTag> entry = DFG::prepareCatchOSREntry(exec, optimizedReplacement, bytecodeIndex); > return entry.executableAddress<char*>(); > } >Index: Source/JavaScriptCore/jit/JITThunks.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITThunks.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/JITThunks.cpp (working copy) >@@ -124,13 +124,13 @@ NativeExecutable* JITThunks::hostFunctio > RefPtr<JITCode> forCall; > if (generator) { > MacroAssemblerCodeRef<JSEntryPtrTag> entry = generator(vm).retagged<JSEntryPtrTag>(); >- forCall = adoptRef(new DirectJITCode(entry, entry.code(), JITCode::HostCallThunk, intrinsic)); >+ forCall = adoptRef(new DirectJITCode(entry, entry.code(), JITType::HostCallThunk, intrinsic)); > } else if (signature) >- forCall = adoptRef(new NativeDOMJITCode(MacroAssemblerCodeRef<JSEntryPtrTag>::createSelfManagedCodeRef(ctiNativeCall(vm).retagged<JSEntryPtrTag>()), JITCode::HostCallThunk, intrinsic, signature)); >+ forCall = adoptRef(new NativeDOMJITCode(MacroAssemblerCodeRef<JSEntryPtrTag>::createSelfManagedCodeRef(ctiNativeCall(vm).retagged<JSEntryPtrTag>()), JITType::HostCallThunk, intrinsic, signature)); > else >- forCall = adoptRef(new NativeJITCode(MacroAssemblerCodeRef<JSEntryPtrTag>::createSelfManagedCodeRef(ctiNativeCall(vm).retagged<JSEntryPtrTag>()), JITCode::HostCallThunk, intrinsic)); >+ forCall = adoptRef(new NativeJITCode(MacroAssemblerCodeRef<JSEntryPtrTag>::createSelfManagedCodeRef(ctiNativeCall(vm).retagged<JSEntryPtrTag>()), JITType::HostCallThunk, intrinsic)); > >- Ref<JITCode> forConstruct = adoptRef(*new NativeJITCode(MacroAssemblerCodeRef<JSEntryPtrTag>::createSelfManagedCodeRef(ctiNativeConstruct(vm).retagged<JSEntryPtrTag>()), JITCode::HostCallThunk, NoIntrinsic)); >+ Ref<JITCode> forConstruct = adoptRef(*new NativeJITCode(MacroAssemblerCodeRef<JSEntryPtrTag>::createSelfManagedCodeRef(ctiNativeConstruct(vm).retagged<JSEntryPtrTag>()), JITType::HostCallThunk, NoIntrinsic)); > > NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, forCall.releaseNonNull(), function, WTFMove(forConstruct), constructor, name); > weakAdd(*m_hostFunctionStubMap, std::make_tuple(function, constructor, name), Weak<NativeExecutable>(nativeExecutable, this)); >Index: Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp (working copy) >@@ -45,7 +45,7 @@ void JITToDFGDeferredCompilationCallback > CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock) > { > ASSERT_UNUSED(profiledDFGCodeBlock, !profiledDFGCodeBlock); >- ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT); >+ ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT); > > if (Options::verboseOSR()) > dataLog("Optimizing compilation of ", *codeBlock, " did become ready.\n"); >@@ -57,7 +57,7 @@ void JITToDFGDeferredCompilationCallback > CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationResult result) > { > ASSERT(!profiledDFGCodeBlock); >- ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT); >+ ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT); > > if (Options::verboseOSR()) > dataLog("Optimizing compilation of ", *codeBlock, " result: ", result, "\n"); >Index: Source/JavaScriptCore/jit/JITWorklist.cpp >=================================================================== >--- Source/JavaScriptCore/jit/JITWorklist.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/JITWorklist.cpp (working copy) >@@ -36,10 +36,12 @@ namespace JSC { > > class JITWorklist::Plan : public ThreadSafeRefCounted<JITWorklist::Plan> { > public: >- Plan(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+ Plan(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset, bool isTracelet) > : m_codeBlock(codeBlock) > , m_jit(codeBlock->vm(), codeBlock, loopOSREntryBytecodeOffset) > { >+ m_jit.m_isTracelet = isTracelet; >+ m_jit.m_isTracelet = true; > m_jit.doMainThreadPreparationBeforeCompile(); > } > >@@ -68,6 +70,10 @@ public: > m_codeBlock->ownerExecutable()->installCode(m_codeBlock); > m_codeBlock->jitSoon(); > return; >+ case CompilationDeferred: >+ // Nothing to compile! >+ m_codeBlock->jitSoon(); >+ return; > default: > RELEASE_ASSERT_NOT_REACHED(); > return; >@@ -83,9 +89,9 @@ public: > return m_isFinishedCompiling; > } > >- static void compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+ static void compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset, bool isTracelet) > { >- Plan plan(codeBlock, loopOSREntryBytecodeOffset); >+ Plan plan(codeBlock, loopOSREntryBytecodeOffset, isTracelet); > plan.compileInThread(); > plan.finalize(); > } >@@ -228,10 +234,10 @@ void JITWorklist::poll(VM& vm) > finalizePlans(myPlans); > } > >-void JITWorklist::compileLater(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+void JITWorklist::compileLater(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset, bool isTracelet) > { > DeferGC deferGC(codeBlock->vm()->heap); >- RELEASE_ASSERT(codeBlock->jitType() == JITCode::InterpreterThunk); >+ //RELEASE_ASSERT(codeBlock->jitType() == JITType::InterpreterThunk); > > if (codeBlock->m_didFailJITCompilation) { > codeBlock->dontJITAnytimeSoon(); >@@ -239,7 +245,7 @@ void JITWorklist::compileLater(CodeBlock > } > > if (!Options::useConcurrentJIT()) { >- Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset); >+ Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset, isTracelet); > return; > } > >@@ -253,7 +259,7 @@ void JITWorklist::compileLater(CodeBlock > > if (m_numAvailableThreads) { > m_planned.add(codeBlock); >- RefPtr<Plan> plan = adoptRef(new Plan(codeBlock, loopOSREntryBytecodeOffset)); >+ RefPtr<Plan> plan = adoptRef(new Plan(codeBlock, loopOSREntryBytecodeOffset, isTracelet)); > m_plans.append(plan); > m_queue.append(plan); > m_condition->notifyAll(locker); >@@ -277,14 +283,26 @@ void JITWorklist::compileLater(CodeBlock > // This works around the issue. If the concurrent JIT thread is convoyed, we revert to main > // thread compiles. This is probably not as good as if we had multiple JIT threads. Maybe we > // can do that someday. >- Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset); >+ Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset, isTracelet); > } > >-void JITWorklist::compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) >+void JITWorklist::compileNow(CodeBlock* codeBlock, unsigned bytecodeIndexToCompile) > { > VM* vm = codeBlock->vm(); > DeferGC deferGC(vm->heap); >- if (codeBlock->jitType() != JITCode::InterpreterThunk) >+ >+ auto didCompileBytecode = [&] { >+ RefPtr<JITCode> jitCode = codeBlock->jitCode(); >+ if (jitCode->isTraceletJITCode()) { >+ TraceletJITCode* tracelet = static_cast<TraceletJITCode*>(jitCode.get()); >+ if (tracelet->m_codeLocations.find(bytecodeIndexToCompile) != tracelet->m_codeLocations.end()) >+ return true; >+ } >+ >+ return false; >+ }; >+ >+ if (didCompileBytecode()) > return; > > bool isPlanned; >@@ -300,15 +318,14 @@ void JITWorklist::compileNow(CodeBlock* > } > > // Now it might be compiled! >- if (codeBlock->jitType() != JITCode::InterpreterThunk) >+ if (didCompileBytecode()) > return; > >- // We do this in case we had previously attempted, and then failed, to compile with the >- // baseline JIT. >- codeBlock->resetJITData(); >- > // OK, just compile it. >- JIT::compile(vm, codeBlock, JITCompilationMustSucceed, loopOSREntryBytecodeOffset); >+ bool isTracelet = Options::useLLInt() ? true : false; >+ JIT::compileNow(vm, codeBlock, JITCompilationMustSucceed, bytecodeIndexToCompile, isTracelet); >+ ASSERT(didCompileBytecode()); >+ // OOPS: change how we installCode(). > codeBlock->ownerExecutable()->installCode(codeBlock); > } > >Index: Source/JavaScriptCore/jit/JITWorklist.h >=================================================================== >--- Source/JavaScriptCore/jit/JITWorklist.h (revision 244505) >+++ Source/JavaScriptCore/jit/JITWorklist.h (working copy) >@@ -53,9 +53,8 @@ public: > bool completeAllForVM(VM&); // Return true if any JIT work happened. > void poll(VM&); > >- void compileLater(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0); >- >- void compileNow(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0); >+ void compileLater(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0, bool isTracelet = false); >+ void compileNow(CodeBlock*, unsigned bytecodeIndexToCompile); > > static JITWorklist& ensureGlobalWorklist(); > static JITWorklist* existingGlobalWorklistOrNull(); >Index: Source/JavaScriptCore/jit/Repatch.cpp >=================================================================== >--- Source/JavaScriptCore/jit/Repatch.cpp (revision 244505) >+++ Source/JavaScriptCore/jit/Repatch.cpp (working copy) >@@ -72,7 +72,7 @@ static FunctionPtr<CFunctionPtrTag> read > { > FunctionPtr<OperationPtrTag> target = MacroAssembler::readCallTarget<OperationPtrTag>(call); > #if ENABLE(FTL_JIT) >- if (codeBlock->jitType() == JITCode::FTLJIT) { >+ if (codeBlock->jitType() == JITType::FTLJIT) { > MacroAssemblerCodePtr<JITThunkPtrTag> thunk = MacroAssemblerCodePtr<OperationPtrTag>::createFromExecutableAddress(target.executableAddress()).retagged<JITThunkPtrTag>(); > return codeBlock->vm()->ftlThunks->keyForSlowPathCallThunk(thunk).callTarget().retagged<CFunctionPtrTag>(); > } >@@ -85,7 +85,7 @@ static FunctionPtr<CFunctionPtrTag> read > void ftlThunkAwareRepatchCall(CodeBlock* codeBlock, CodeLocationCall<JSInternalPtrTag> call, FunctionPtr<CFunctionPtrTag> newCalleeFunction) > { > #if ENABLE(FTL_JIT) >- if (codeBlock->jitType() == JITCode::FTLJIT) { >+ if (codeBlock->jitType() == JITType::FTLJIT) { > VM& vm = *codeBlock->vm(); > FTL::Thunks& thunks = *vm.ftlThunks; > FunctionPtr<OperationPtrTag> target = MacroAssembler::readCallTarget<OperationPtrTag>(call); >Index: Source/JavaScriptCore/llint/LLIntEntrypoint.cpp >=================================================================== >--- Source/JavaScriptCore/llint/LLIntEntrypoint.cpp (revision 244505) >+++ Source/JavaScriptCore/llint/LLIntEntrypoint.cpp (working copy) >@@ -51,7 +51,7 @@ static void setFunctionEntrypoint(CodeBl > std::call_once(onceKey, [&] { > auto callRef = functionForCallEntryThunk().retagged<JSEntryPtrTag>(); > auto callArityCheckRef = functionForCallArityCheckThunk().retaggedCode<JSEntryPtrTag>(); >- jitCode = new DirectJITCode(callRef, callArityCheckRef, JITCode::InterpreterThunk, JITCode::ShareAttribute::Shared); >+ jitCode = new DirectJITCode(callRef, callArityCheckRef, JITType::InterpreterThunk, JITCode::ShareAttribute::Shared); > }); > > codeBlock->setJITCode(makeRef(*jitCode)); >@@ -64,7 +64,7 @@ static void setFunctionEntrypoint(CodeBl > std::call_once(onceKey, [&] { > auto constructRef = functionForConstructEntryThunk().retagged<JSEntryPtrTag>(); > auto constructArityCheckRef = functionForConstructArityCheckThunk().retaggedCode<JSEntryPtrTag>(); >- jitCode = new DirectJITCode(constructRef, constructArityCheckRef, JITCode::InterpreterThunk, JITCode::ShareAttribute::Shared); >+ jitCode = new DirectJITCode(constructRef, constructArityCheckRef, JITType::InterpreterThunk, JITCode::ShareAttribute::Shared); > }); > > codeBlock->setJITCode(makeRef(*jitCode)); >@@ -76,14 +76,14 @@ static void setFunctionEntrypoint(CodeBl > static DirectJITCode* jitCode; > static std::once_flag onceKey; > std::call_once(onceKey, [&] { >- jitCode = new DirectJITCode(getCodeRef<JSEntryPtrTag>(llint_function_for_call_prologue), getCodePtr<JSEntryPtrTag>(llint_function_for_call_arity_check), JITCode::InterpreterThunk, JITCode::ShareAttribute::Shared); >+ jitCode = new DirectJITCode(getCodeRef<JSEntryPtrTag>(llint_function_for_call_prologue), getCodePtr<JSEntryPtrTag>(llint_function_for_call_arity_check), JITType::InterpreterThunk, JITCode::ShareAttribute::Shared); > }); > codeBlock->setJITCode(makeRef(*jitCode)); > } else { > static DirectJITCode* jitCode; > static std::once_flag onceKey; > std::call_once(onceKey, [&] { >- jitCode = new DirectJITCode(getCodeRef<JSEntryPtrTag>(llint_function_for_construct_prologue), getCodePtr<JSEntryPtrTag>(llint_function_for_construct_arity_check), JITCode::InterpreterThunk, JITCode::ShareAttribute::Shared); >+ jitCode = new DirectJITCode(getCodeRef<JSEntryPtrTag>(llint_function_for_construct_prologue), getCodePtr<JSEntryPtrTag>(llint_function_for_construct_arity_check), JITType::InterpreterThunk, JITCode::ShareAttribute::Shared); > }); > codeBlock->setJITCode(makeRef(*jitCode)); > } >@@ -97,7 +97,7 @@ static void setEvalEntrypoint(CodeBlock* > static std::once_flag onceKey; > std::call_once(onceKey, [&] { > MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = evalEntryThunk().retagged<JSEntryPtrTag>(); >- jitCode = new NativeJITCode(codeRef, JITCode::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); >+ jitCode = new NativeJITCode(codeRef, JITType::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); > }); > codeBlock->setJITCode(makeRef(*jitCode)); > return; >@@ -107,7 +107,7 @@ static void setEvalEntrypoint(CodeBlock* > static NativeJITCode* jitCode; > static std::once_flag onceKey; > std::call_once(onceKey, [&] { >- jitCode = new NativeJITCode(getCodeRef<JSEntryPtrTag>(llint_eval_prologue), JITCode::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); >+ jitCode = new NativeJITCode(getCodeRef<JSEntryPtrTag>(llint_eval_prologue), JITType::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); > }); > codeBlock->setJITCode(makeRef(*jitCode)); > } >@@ -120,7 +120,7 @@ static void setProgramEntrypoint(CodeBlo > static std::once_flag onceKey; > std::call_once(onceKey, [&] { > MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = programEntryThunk().retagged<JSEntryPtrTag>(); >- jitCode = new NativeJITCode(codeRef, JITCode::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); >+ jitCode = new NativeJITCode(codeRef, JITType::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); > }); > codeBlock->setJITCode(makeRef(*jitCode)); > return; >@@ -130,7 +130,7 @@ static void setProgramEntrypoint(CodeBlo > static NativeJITCode* jitCode; > static std::once_flag onceKey; > std::call_once(onceKey, [&] { >- jitCode = new NativeJITCode(getCodeRef<JSEntryPtrTag>(llint_program_prologue), JITCode::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); >+ jitCode = new NativeJITCode(getCodeRef<JSEntryPtrTag>(llint_program_prologue), JITType::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); > }); > codeBlock->setJITCode(makeRef(*jitCode)); > } >@@ -143,7 +143,7 @@ static void setModuleProgramEntrypoint(C > static std::once_flag onceKey; > std::call_once(onceKey, [&] { > MacroAssemblerCodeRef<JSEntryPtrTag> codeRef = moduleProgramEntryThunk().retagged<JSEntryPtrTag>(); >- jitCode = new NativeJITCode(codeRef, JITCode::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); >+ jitCode = new NativeJITCode(codeRef, JITType::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); > }); > codeBlock->setJITCode(makeRef(*jitCode)); > return; >@@ -153,7 +153,7 @@ static void setModuleProgramEntrypoint(C > static NativeJITCode* jitCode; > static std::once_flag onceKey; > std::call_once(onceKey, [&] { >- jitCode = new NativeJITCode(getCodeRef<JSEntryPtrTag>(llint_module_program_prologue), JITCode::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); >+ jitCode = new NativeJITCode(getCodeRef<JSEntryPtrTag>(llint_module_program_prologue), JITType::InterpreterThunk, Intrinsic::NoIntrinsic, JITCode::ShareAttribute::Shared); > }); > codeBlock->setJITCode(makeRef(*jitCode)); > } >@@ -180,6 +180,8 @@ void setEntrypoint(CodeBlock* codeBlock) > > unsigned frameRegisterCountFor(CodeBlock* codeBlock) > { >+ // OOPS: Combine this with JIT's function to ensure they're always the same! >+ > ASSERT(static_cast<unsigned>(codeBlock->numCalleeLocals()) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->numCalleeLocals()))); > > return roundLocalRegisterCountForFramePointerOffset(codeBlock->numCalleeLocals() + maxFrameExtentForSlowPathCallInRegisters); >Index: Source/JavaScriptCore/llint/LLIntSlowPaths.cpp >=================================================================== >--- Source/JavaScriptCore/llint/LLIntSlowPaths.cpp (revision 244505) >+++ Source/JavaScriptCore/llint/LLIntSlowPaths.cpp (working copy) >@@ -383,15 +383,15 @@ inline bool jitCompileAndSetHeuristics(C > JITWorklist::ensureGlobalWorklist().poll(vm); > > switch (codeBlock->jitType()) { >- case JITCode::BaselineJIT: { >+ case JITType::BaselineJIT: { > if (Options::verboseOSR()) > dataLogF(" Code was already compiled.\n"); > codeBlock->jitSoon(); > return true; > } >- case JITCode::InterpreterThunk: { >+ case JITType::InterpreterThunk: { > JITWorklist::ensureGlobalWorklist().compileLater(codeBlock, loopOSREntryBytecodeOffset); >- return codeBlock->jitType() == JITCode::BaselineJIT; >+ return codeBlock->jitType() == JITType::BaselineJIT; > } > default: > dataLog("Unexpected code block in LLInt: ", *codeBlock, "\n"); >@@ -412,7 +412,28 @@ static SlowPathReturnType entryOSR(ExecS > codeBlock->dontJITAnytimeSoon(); > LLINT_RETURN_TWO(0, 0); > } >- if (!jitCompileAndSetHeuristics(codeBlock, exec)) >+ >+ auto returnPC = [&] () -> MacroAssemblerCodePtr<JITTraceletPtrTag> { >+ JITCode* jitCode = codeBlock->jitCode().get(); >+ >+ if (jitCode->isTraceletJITCode()) { >+ auto* traceletJITCode = static_cast<TraceletJITCode*>(jitCode); >+ return traceletJITCode->findCodeLocation(0); >+ } >+ >+ return MacroAssemblerCodePtr<JITTraceletPtrTag>(); >+ }; >+ >+ if (!returnPC()) { >+ if (!jitCompileAndSetHeuristics(codeBlock, exec)) >+ LLINT_RETURN_TWO(0, 0); >+ } >+ >+ // OOPS: Have a nice way to say if we compiled the prologue instead of searching for zero! >+ JITCode* jitCode = codeBlock->jitCode().get(); >+ RELEASE_ASSERT(jitCode->isTraceletJITCode()); // OOPS: Not true once DFG OSR exit happens! >+ auto iter = static_cast<TraceletJITCode*>(jitCode)->m_codeLocations.find(0); >+ if (iter == static_cast<TraceletJITCode*>(jitCode)->m_codeLocations.end()) > LLINT_RETURN_TWO(0, 0); > > CODEBLOCK_LOG_EVENT(codeBlock, "OSR entry", ("in prologue")); >@@ -474,19 +495,35 @@ LLINT_SLOW_PATH_DECL(loop_osr) > codeBlock->dontJITAnytimeSoon(); > LLINT_RETURN_TWO(0, 0); > } >+ >+ auto returnPC = [&] () -> MacroAssemblerCodePtr<JITTraceletPtrTag> { >+ JITCode* jitCode = codeBlock->jitCode().get(); >+ >+ if (jitCode->isTraceletJITCode()) { >+ auto* traceletJITCode = static_cast<TraceletJITCode*>(jitCode); >+ return traceletJITCode->findCodeLocation(loopOSREntryBytecodeOffset); >+ } >+ >+ return MacroAssemblerCodePtr<JITTraceletPtrTag>(); >+ }; >+ >+ MacroAssemblerCodePtr<JITTraceletPtrTag> codePtr = returnPC(); >+ if (codePtr) >+ LLINT_RETURN_TWO(codePtr.retagged<JSEntryPtrTag>().executableAddress(), exec->topOfFrame()); > > if (!jitCompileAndSetHeuristics(codeBlock, exec, loopOSREntryBytecodeOffset)) > LLINT_RETURN_TWO(0, 0); > > CODEBLOCK_LOG_EVENT(codeBlock, "osrEntry", ("at bc#", loopOSREntryBytecodeOffset)); > >- ASSERT(codeBlock->jitType() == JITCode::BaselineJIT); >- >- const JITCodeMap& codeMap = codeBlock->jitCodeMap(); >- CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(loopOSREntryBytecodeOffset); >- ASSERT(codeLocation); >+ ASSERT(codeBlock->jitType() == JITType::BaselineJIT); >+ codePtr = static_cast<TraceletJITCode*>(codeBlock->jitCode().get())->findCodeLocation(loopOSREntryBytecodeOffset); >+ if (!codePtr) { >+ //dataLogLn("loop_hint not compiled yet!"); >+ LLINT_RETURN_TWO(0, 0); >+ } > >- void* jumpTarget = codeLocation.executableAddress(); >+ void* jumpTarget = codePtr.retagged<JSEntryPtrTag>().executableAddress(); > ASSERT(jumpTarget); > > LLINT_RETURN_TWO(jumpTarget, exec->topOfFrame()); >@@ -1956,6 +1993,51 @@ LLINT_SLOW_PATH_DECL(slow_path_out_of_li > LLINT_END_IMPL(); > } > >+LLINT_SLOW_PATH_DECL(trace_hint) >+{ >+ LLINT_BEGIN_NO_SET_PC(); >+ UNUSED_PARAM(throwScope); >+ //dataLogLn("trace_hint slow path!"); >+ >+ auto bytecode = pc->as<OpTraceHint>(); >+ auto& metadata = bytecode.metadata(exec); >+ metadata.m_shouldCompile = true; >+ >+ CodeBlock* codeBlock = exec->codeBlock(); >+ if (!shouldJIT(codeBlock)) >+ LLINT_RETURN_TWO(0, 0); >+ >+ auto returnPC = [&] () -> void* { >+ JITCode* jitCode = codeBlock->jitCode().get(); >+ >+ if (jitCode->isTraceletJITCode()) { >+ //dataLogLn("at trace_hint bc#", codeBlock->bytecodeOffset(pc)); >+ auto* traceletJITCode = static_cast<TraceletJITCode*>(jitCode); >+ auto iter = traceletJITCode->m_codeLocations.find(codeBlock->bytecodeOffset(pc)); >+ if (iter != traceletJITCode->m_codeLocations.end()) { >+ auto* result = iter->value.executableAddress(); >+ //dataLogLn("LLInt trace_hint returning JIT PC to jump to: ", RawPointer(result), " for bc#", codeBlock->bytecodeOffset(pc), " in codeblock: ", codeBlock->inferredName().data(), "#", codeBlock->hashAsStringIfPossible().data()); >+ >+ metadata.m_entrypoint = bitwise_cast<uintptr_t>(result); >+ >+ return result; >+ } >+ } >+ >+ return nullptr; >+ }; >+ >+ if (auto* ret = returnPC()) >+ LLINT_RETURN_TWO(ret, 0); >+ >+ JITWorklist::ensureGlobalWorklist().compileLater(codeBlock, 0, true); >+ if (auto* ret = returnPC()) >+ LLINT_RETURN_TWO(ret, 0); >+ >+ metadata.m_count = -15; >+ LLINT_RETURN_TWO(0, 0); >+} >+ > extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM* vm, ProtoCallFrame* protoFrame) > { > ExecState* exec = vm->topCallFrame; >Index: Source/JavaScriptCore/llint/LLIntSlowPaths.h >=================================================================== >--- Source/JavaScriptCore/llint/LLIntSlowPaths.h (revision 244505) >+++ Source/JavaScriptCore/llint/LLIntSlowPaths.h (working copy) >@@ -134,6 +134,7 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_lo > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_super_sampler_begin); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_super_sampler_end); > LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_out_of_line_jump_target); >+LLINT_SLOW_PATH_HIDDEN_DECL(trace_hint); > extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM*, ProtoCallFrame*) WTF_INTERNAL; > #if ENABLE(C_LOOP) > extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM*, Register*) WTF_INTERNAL; >Index: Source/JavaScriptCore/llint/LowLevelInterpreter.asm >=================================================================== >--- Source/JavaScriptCore/llint/LowLevelInterpreter.asm (revision 244505) >+++ Source/JavaScriptCore/llint/LowLevelInterpreter.asm (working copy) >@@ -247,6 +247,7 @@ const ArithProfileNumberNumber = constex > # Pointer Tags > const BytecodePtrTag = constexpr BytecodePtrTag > const JSEntryPtrTag = constexpr JSEntryPtrTag >+const JITTraceletPtrTag = constexpr JITTraceletPtrTag > const ExceptionHandlerPtrTag = constexpr ExceptionHandlerPtrTag > const NoPtrTag = constexpr NoPtrTag > const SlowPathPtrTag = constexpr SlowPathPtrTag >@@ -1632,6 +1633,28 @@ llintOp(op_loop_hint, OpLoopHint, macro > dispatch() > end) > >+llintOpWithMetadata(op_trace_hint, OpTraceHint, macro (size, get, dispatch, metadata, return) >+ metadata(t2, t0) >+ baddis 1, OpTraceHint::Metadata::m_count[t2], .continue >+ loadp OpTraceHint::Metadata::m_entrypoint[t2], t1 >+ btpz t1, .doCompile >+ #jmp t1, JITTraceletPtrTag >+ >+.doCompile: >+ storei PC, ArgumentCount + TagOffset[cfr] >+ prepareStateForCCall() >+ move cfr, a0 >+ move PC, a1 >+ cCall2(_llint_trace_hint) >+ btpz r0, .recover >+ jmp r0, JITTraceletPtrTag >+.recover: >+ loadi ArgumentCount + TagOffset[cfr], PC >+ >+.continue: >+ dispatch() >+end) >+ > > llintOp(op_check_traps, OpCheckTraps, macro (unused, unused, dispatch) > loadp CodeBlock[cfr], t1 >Index: Source/JavaScriptCore/runtime/JSCPtrTag.h >=================================================================== >--- Source/JavaScriptCore/runtime/JSCPtrTag.h (revision 244505) >+++ Source/JavaScriptCore/runtime/JSCPtrTag.h (working copy) >@@ -39,6 +39,7 @@ using PtrTag = WTF::PtrTag; > v(ExceptionHandlerPtrTag) \ > v(ExecutableMemoryPtrTag) \ > v(JITThunkPtrTag) \ >+ v(JITTraceletPtrTag) \ > v(JITStubRoutinePtrTag) \ > v(JSEntryPtrTag) \ > v(JSInternalPtrTag) \ >Index: Source/JavaScriptCore/runtime/SamplingProfiler.cpp >=================================================================== >--- Source/JavaScriptCore/runtime/SamplingProfiler.cpp (revision 244505) >+++ Source/JavaScriptCore/runtime/SamplingProfiler.cpp (working copy) >@@ -590,7 +590,7 @@ void SamplingProfiler::processUnverified > // inside the LLInt's handleUncaughtException. So we just protect against this > // by ignoring it. > unsigned bytecodeIndex = 0; >- if (topCodeBlock->jitType() == JITCode::InterpreterThunk || topCodeBlock->jitType() == JITCode::BaselineJIT) { >+ if (topCodeBlock->jitType() == JITType::InterpreterThunk || topCodeBlock->jitType() == JITType::BaselineJIT) { > bool isValidPC; > unsigned bits; > #if USE(JSVALUE64) >Index: Source/JavaScriptCore/runtime/SamplingProfiler.h >=================================================================== >--- Source/JavaScriptCore/runtime/SamplingProfiler.h (revision 244505) >+++ Source/JavaScriptCore/runtime/SamplingProfiler.h (working copy) >@@ -108,7 +108,7 @@ public: > unsigned columnNumber { std::numeric_limits<unsigned>::max() }; > unsigned bytecodeIndex { std::numeric_limits<unsigned>::max() }; > CodeBlockHash codeBlockHash; >- JITCode::JITType jitType { JITCode::None }; >+ JITType jitType { JITType::None }; > }; > > CodeLocation semanticLocation; >Index: Source/JavaScriptCore/runtime/ScriptExecutable.cpp >=================================================================== >--- Source/JavaScriptCore/runtime/ScriptExecutable.cpp (revision 244505) >+++ Source/JavaScriptCore/runtime/ScriptExecutable.cpp (working copy) >@@ -184,6 +184,11 @@ void ScriptExecutable::installCode(VM& v > break; > } > >+ //dataLogLn("Install code on executable: ", RawPointer(this), " m_jitCodeForConstruct=", RawPointer(m_jitCodeForConstruct.get()), " m_jitCodeForCall=", RawPointer(m_jitCodeForCall.get())); >+ //WTFReportBacktrace(); >+ //dataLogLn(); >+ //dataLogLn(); >+ > auto& clearableCodeSet = VM::SpaceAndSet::setFor(*subspace()); > if (hasClearableCode(vm)) > clearableCodeSet.add(this); >@@ -386,7 +391,7 @@ static void setupLLInt(CodeBlock* codeBl > static void setupJIT(VM& vm, CodeBlock* codeBlock) > { > #if ENABLE(JIT) >- CompilationResult result = JIT::compile(&vm, codeBlock, JITCompilationMustSucceed); >+ CompilationResult result = JIT::compileNow(&vm, codeBlock, JITCompilationMustSucceed, 0, false); > RELEASE_ASSERT(result == CompilationSuccessful); > #else > UNUSED_PARAM(vm); >Index: Source/JavaScriptCore/runtime/VM.cpp >=================================================================== >--- Source/JavaScriptCore/runtime/VM.cpp (revision 244505) >+++ Source/JavaScriptCore/runtime/VM.cpp (working copy) >@@ -689,7 +689,7 @@ static Ref<NativeJITCode> jitCodeForCall > static NativeJITCode* result; > static std::once_flag onceKey; > std::call_once(onceKey, [&] { >- result = new NativeJITCode(LLInt::getCodeRef<JSEntryPtrTag>(llint_native_call_trampoline), JITCode::HostCallThunk, NoIntrinsic); >+ result = new NativeJITCode(LLInt::getCodeRef<JSEntryPtrTag>(llint_native_call_trampoline), JITType::HostCallThunk, NoIntrinsic); > }); > return makeRef(*result); > } >@@ -699,7 +699,7 @@ static Ref<NativeJITCode> jitCodeForCons > static NativeJITCode* result; > static std::once_flag onceKey; > std::call_once(onceKey, [&] { >- result = new NativeJITCode(LLInt::getCodeRef<JSEntryPtrTag>(llint_native_construct_trampoline), JITCode::HostCallThunk, NoIntrinsic); >+ result = new NativeJITCode(LLInt::getCodeRef<JSEntryPtrTag>(llint_native_construct_trampoline), JITType::HostCallThunk, NoIntrinsic); > }); > return makeRef(*result); > } >Index: Source/JavaScriptCore/tools/CodeProfile.cpp >=================================================================== >--- Source/JavaScriptCore/tools/CodeProfile.cpp (revision 244505) >+++ Source/JavaScriptCore/tools/CodeProfile.cpp (working copy) >@@ -82,7 +82,7 @@ void CodeProfile::sample(void* pc, void* > type = RegExpCode; > else { > CodeBlock* codeBlock = static_cast<CodeBlock*>(ownerUID); >- if (codeBlock->jitType() == JITCode::DFGJIT) >+ if (codeBlock->jitType() == JITType::DFGJIT) > type = DFGJIT; > else if (!canCompile(codeBlock->capabilityLevelState())) > type = BaselineOnly; >Index: Source/JavaScriptCore/tools/JSDollarVM.cpp >=================================================================== >--- Source/JavaScriptCore/tools/JSDollarVM.cpp (revision 244505) >+++ Source/JavaScriptCore/tools/JSDollarVM.cpp (working copy) >@@ -1300,7 +1300,7 @@ class CallerFrameJITTypeFunctor { > public: > CallerFrameJITTypeFunctor() > : m_currentFrame(0) >- , m_jitType(JITCode::None) >+ , m_jitType(JITType::None) > { > } > >@@ -1313,11 +1313,11 @@ public: > return StackVisitor::Continue; > } > >- JITCode::JITType jitType() { return m_jitType; } >+ JITType jitType() { return m_jitType; } > > private: > mutable unsigned m_currentFrame; >- mutable JITCode::JITType m_jitType; >+ mutable JITType m_jitType; > }; > > static FunctionExecutable* getExecutableForFunction(JSValue theFunctionValue) >@@ -1344,7 +1344,7 @@ static EncodedJSValue JSC_HOST_CALL func > return JSValue::encode(jsUndefined()); > CallerFrameJITTypeFunctor functor; > exec->iterate(functor); >- return JSValue::encode(jsBoolean(functor.jitType() == JITCode::InterpreterThunk)); >+ return JSValue::encode(jsBoolean(functor.jitType() == JITType::InterpreterThunk)); > } > > // Returns true if the current frame is a baseline JIT frame. >@@ -1355,7 +1355,7 @@ static EncodedJSValue JSC_HOST_CALL func > return JSValue::encode(jsUndefined()); > CallerFrameJITTypeFunctor functor; > exec->iterate(functor); >- return JSValue::encode(jsBoolean(functor.jitType() == JITCode::BaselineJIT)); >+ return JSValue::encode(jsBoolean(functor.jitType() == JITType::BaselineJIT)); > } > > // Set that the argument function should not be inlined.
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 196943
:
367712
|
367768
|
367960
|
367998
|
368007
|
368292
|
368299
|
368355
|
368357
|
368369
|
368379
|
368505
|
368525
|
368527
|
368640
|
368658
|
368660
|
368662
|
368664
|
368754
|
368767
|
369170
|
369453
|
369455
|
369515
|
369535
|
369581
|
369607
|
369770
|
369792
|
370097
|
370364