Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (261641 => 261642)
--- trunk/Source/_javascript_Core/ChangeLog 2020-05-13 20:35:31 UTC (rev 261641)
+++ trunk/Source/_javascript_Core/ChangeLog 2020-05-13 20:37:35 UTC (rev 261642)
@@ -1,5 +1,34 @@
2020-05-13 Caio Lima <ticaiol...@gmail.com>
+ Making 32-bits JIT build without Unified Build system
+ https://bugs.webkit.org/show_bug.cgi?id=211853
+
+ Reviewed by Adrian Perez de Castro.
+
+ This patch is moving some templates to allow non-unified builds on
+ 32-bits JIT configurations.
+ Those templates were from JITArithmetic32_64 and JITPropertyAccess32_64.
+
+ * jit/JITArithmetic.cpp:
+ (JSC::JIT::emit_compareAndJump):
+ (JSC::JIT::emit_compareUnsignedAndJump):
+ (JSC::JIT::emit_compareUnsigned):
+ (JSC::JIT::emit_compareAndJumpSlow):
+ (JSC::JIT::emitBinaryDoubleOp):
+ * jit/JITArithmetic32_64.cpp:
+ (JSC::JIT::emit_compareAndJump): Deleted.
+ (JSC::JIT::emit_compareUnsignedAndJump): Deleted.
+ (JSC::JIT::emit_compareUnsigned): Deleted.
+ (JSC::JIT::emit_compareAndJumpSlow): Deleted.
+ (JSC::JIT::emitBinaryDoubleOp): Deleted.
+ * jit/JITOpcodes32_64.cpp:
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emitPutByValWithCachedId):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::emitPutByValWithCachedId): Deleted.
+
+2020-05-13 Caio Lima <ticaiol...@gmail.com>
+
[JSC] Support delete by val/id IC on 32-bits
https://bugs.webkit.org/show_bug.cgi?id=208207
Modified: trunk/Source/_javascript_Core/jit/JITArithmetic.cpp (261641 => 261642)
--- trunk/Source/_javascript_Core/jit/JITArithmetic.cpp 2020-05-13 20:35:31 UTC (rev 261641)
+++ trunk/Source/_javascript_Core/jit/JITArithmetic.cpp 2020-05-13 20:37:35 UTC (rev 261642)
@@ -469,6 +469,258 @@
/* ------------------------------ END: OP_MOD ------------------------------ */
+#else // USE(JSVALUE64)
+
+template <typename Op>
+void JIT::emit_compareAndJump(const Instruction* instruction, RelationalCondition condition)
+{
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ auto bytecode = instruction->as<Op>();
+ VirtualRegister op1 = bytecode.m_lhs;
+ VirtualRegister op2 = bytecode.m_rhs;
+ unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
+
+ // Character less.
+ if (isOperandConstantChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branchIfNotCell(regT1));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branchIfNotCell(regT1));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branchIfNotInt32(regT3));
+ addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branchIfNotInt32(regT1));
+ addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branchIfNotInt32(regT1));
+ notInt32Op2.append(branchIfNotInt32(regT3));
+ addJump(branch32(condition, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp<Op>(instruction, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantInt(op1), isOperandConstantInt(op1) || !isOperandConstantInt(op2));
+ end.link(this);
+}
+
+template <typename Op>
+void JIT::emit_compareUnsignedAndJump(const Instruction* instruction, RelationalCondition condition)
+{
+ auto bytecode = instruction->as<Op>();
+ VirtualRegister op1 = bytecode.m_lhs;
+ VirtualRegister op2 = bytecode.m_rhs;
+ unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
+
+ if (isOperandConstantInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addJump(branch32(condition, regT0, regT2), target);
+ }
+}
+
+template <typename Op>
+void JIT::emit_compareUnsigned(const Instruction* instruction, RelationalCondition condition)
+{
+ auto bytecode = instruction->as<Op>();
+ VirtualRegister dst = bytecode.m_dst;
+ VirtualRegister op1 = bytecode.m_lhs;
+ VirtualRegister op2 = bytecode.m_rhs;
+
+ if (isOperandConstantInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ compare32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32()), regT0);
+ } else if (isOperandConstantInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ compare32(condition, regT0, Imm32(getConstantOperand(op2).asInt32()), regT0);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ compare32(condition, regT0, regT2, regT0);
+ }
+ emitStoreBool(dst, regT0);
+}
+
+template <typename Op>
+void JIT::emit_compareAndJumpSlow(const Instruction *instruction, DoubleCondition, size_t (JIT_OPERATION *operation)(JSGlobalObject*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
+{
+ auto bytecode = instruction->as<Op>();
+ VirtualRegister op1 = bytecode.m_lhs;
+ VirtualRegister op2 = bytecode.m_rhs;
+ unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
+
+ linkAllSlowCases(iter);
+
+ emitLoad(op1, regT1, regT0);
+ emitLoad(op2, regT3, regT2);
+ callOperation(operation, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
+}
+
+template <typename Op>
+void JIT::emitBinaryDoubleOp(const Instruction *instruction, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+{
+ JumpList end;
+
+ auto bytecode = instruction->as<Op>();
+ int opcodeID = Op::opcodeID;
+ int target = jumpTarget(instruction, bytecode.m_targetLabel);
+ VirtualRegister op1 = bytecode.m_lhs;
+ VirtualRegister op2 = bytecode.m_rhs;
+
+ if (!notInt32Op1.empty()) {
+ // Double case 1: Op1 is not int32; Op2 is unknown.
+ notInt32Op1.link(this);
+
+ ASSERT(op1IsInRegisters);
+
+ // Verify Op1 is double.
+ if (!types.first().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ if (!op2IsInRegisters)
+ emitLoad(op2, regT3, regT2);
+
+ Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
+
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branchIfNotInt32(regT3));
+
+ convertInt32ToDouble(regT2, fpRegT0);
+ Jump doTheMath = jump();
+
+ // Load Op2 as double into double register.
+ doubleOp2.link(this);
+ emitLoadDouble(op2, fpRegT0);
+
+ // Do the math.
+ doTheMath.link(this);
+ switch (opcodeID) {
+ case op_jless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanAndOrdered, fpRegT2, fpRegT0), target);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqualAndOrdered, fpRegT2, fpRegT0), target);
+ break;
+ case op_jgreater:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleGreaterThanAndOrdered, fpRegT2, fpRegT0), target);
+ break;
+ case op_jgreatereq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleGreaterThanOrEqualAndOrdered, fpRegT2, fpRegT0), target);
+ break;
+ case op_jnless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), target);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), target);
+ break;
+ case op_jngreater:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), target);
+ break;
+ case op_jngreatereq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), target);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ if (!notInt32Op2.empty())
+ end.append(jump());
+ }
+
+ if (!notInt32Op2.empty()) {
+ // Double case 2: Op1 is int32; Op2 is not int32.
+ notInt32Op2.link(this);
+
+ ASSERT(op2IsInRegisters);
+
+ if (!op1IsInRegisters)
+ emitLoadPayload(op1, regT0);
+
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ // Verify op2 is double.
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
+
+ // Do the math.
+ switch (opcodeID) {
+ case op_jless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanAndOrdered, fpRegT0, fpRegT1), target);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqualAndOrdered, fpRegT0, fpRegT1), target);
+ break;
+ case op_jgreater:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleGreaterThanAndOrdered, fpRegT0, fpRegT1), target);
+ break;
+ case op_jgreatereq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleGreaterThanOrEqualAndOrdered, fpRegT0, fpRegT1), target);
+ break;
+ case op_jnless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
+ break;
+ case op_jngreater:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
+ break;
+ case op_jngreatereq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), target);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ end.link(this);
+}
+
#endif // USE(JSVALUE64)
void JIT::emit_op_negate(const Instruction* currentInstruction)
Modified: trunk/Source/_javascript_Core/jit/JITArithmetic32_64.cpp (261641 => 261642)
--- trunk/Source/_javascript_Core/jit/JITArithmetic32_64.cpp 2020-05-13 20:35:31 UTC (rev 261641)
+++ trunk/Source/_javascript_Core/jit/JITArithmetic32_64.cpp 2020-05-13 20:37:35 UTC (rev 261642)
@@ -41,120 +41,6 @@
namespace JSC {
-template <typename Op>
-void JIT::emit_compareAndJump(const Instruction* instruction, RelationalCondition condition)
-{
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- auto bytecode = instruction->as<Op>();
- VirtualRegister op1 = bytecode.m_lhs;
- VirtualRegister op2 = bytecode.m_rhs;
- unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
-
- // Character less.
- if (isOperandConstantChar(op1)) {
- emitLoad(op2, regT1, regT0);
- addSlowCase(branchIfNotCell(regT1));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantChar(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branchIfNotCell(regT1));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantInt(op1)) {
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branchIfNotInt32(regT3));
- addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branchIfNotInt32(regT1));
- addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branchIfNotInt32(regT1));
- notInt32Op2.append(branchIfNotInt32(regT3));
- addJump(branch32(condition, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp<Op>(instruction, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantInt(op1), isOperandConstantInt(op1) || !isOperandConstantInt(op2));
- end.link(this);
-}
-
-template <typename Op>
-void JIT::emit_compareUnsignedAndJump(const Instruction* instruction, RelationalCondition condition)
-{
- auto bytecode = instruction->as<Op>();
- VirtualRegister op1 = bytecode.m_lhs;
- VirtualRegister op2 = bytecode.m_rhs;
- unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
-
- if (isOperandConstantInt(op1)) {
- emitLoad(op2, regT3, regT2);
- addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addJump(branch32(condition, regT0, regT2), target);
- }
-}
-
-template <typename Op>
-void JIT::emit_compareUnsigned(const Instruction* instruction, RelationalCondition condition)
-{
- auto bytecode = instruction->as<Op>();
- VirtualRegister dst = bytecode.m_dst;
- VirtualRegister op1 = bytecode.m_lhs;
- VirtualRegister op2 = bytecode.m_rhs;
-
- if (isOperandConstantInt(op1)) {
- emitLoad(op2, regT3, regT2);
- compare32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32()), regT0);
- } else if (isOperandConstantInt(op2)) {
- emitLoad(op1, regT1, regT0);
- compare32(condition, regT0, Imm32(getConstantOperand(op2).asInt32()), regT0);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- compare32(condition, regT0, regT2, regT0);
- }
- emitStoreBool(dst, regT0);
-}
-
-template <typename Op>
-void JIT::emit_compareAndJumpSlow(const Instruction *instruction, DoubleCondition, size_t (JIT_OPERATION *operation)(JSGlobalObject*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
-{
- auto bytecode = instruction->as<Op>();
- VirtualRegister op1 = bytecode.m_lhs;
- VirtualRegister op2 = bytecode.m_rhs;
- unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
-
- linkAllSlowCases(iter);
-
- emitLoad(op1, regT1, regT0);
- emitLoad(op2, regT3, regT2);
- callOperation(operation, m_codeBlock->globalObject(), JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2));
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
-}
-
void JIT::emit_op_unsigned(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpUnsigned>();
@@ -192,142 +78,6 @@
emitStoreInt32(srcDst, regT0, true);
}
-template <typename Op>
-void JIT::emitBinaryDoubleOp(const Instruction *instruction, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
-{
- JumpList end;
-
- auto bytecode = instruction->as<Op>();
- int opcodeID = Op::opcodeID;
- int target = jumpTarget(instruction, bytecode.m_targetLabel);
- VirtualRegister op1 = bytecode.m_lhs;
- VirtualRegister op2 = bytecode.m_rhs;
-
- if (!notInt32Op1.empty()) {
- // Double case 1: Op1 is not int32; Op2 is unknown.
- notInt32Op1.link(this);
-
- ASSERT(op1IsInRegisters);
-
- // Verify Op1 is double.
- if (!types.first().definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
-
- if (!op2IsInRegisters)
- emitLoad(op2, regT3, regT2);
-
- Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
-
- if (!types.second().definitelyIsNumber())
- addSlowCase(branchIfNotInt32(regT3));
-
- convertInt32ToDouble(regT2, fpRegT0);
- Jump doTheMath = jump();
-
- // Load Op2 as double into double register.
- doubleOp2.link(this);
- emitLoadDouble(op2, fpRegT0);
-
- // Do the math.
- doTheMath.link(this);
- switch (opcodeID) {
- case op_jless:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanAndOrdered, fpRegT2, fpRegT0), target);
- break;
- case op_jlesseq:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrEqualAndOrdered, fpRegT2, fpRegT0), target);
- break;
- case op_jgreater:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleGreaterThanAndOrdered, fpRegT2, fpRegT0), target);
- break;
- case op_jgreatereq:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleGreaterThanOrEqualAndOrdered, fpRegT2, fpRegT0), target);
- break;
- case op_jnless:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), target);
- break;
- case op_jnlesseq:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), target);
- break;
- case op_jngreater:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), target);
- break;
- case op_jngreatereq:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), target);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- if (!notInt32Op2.empty())
- end.append(jump());
- }
-
- if (!notInt32Op2.empty()) {
- // Double case 2: Op1 is int32; Op2 is not int32.
- notInt32Op2.link(this);
-
- ASSERT(op2IsInRegisters);
-
- if (!op1IsInRegisters)
- emitLoadPayload(op1, regT0);
-
- convertInt32ToDouble(regT0, fpRegT0);
-
- // Verify op2 is double.
- if (!types.second().definitelyIsNumber())
- addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
-
- // Do the math.
- switch (opcodeID) {
- case op_jless:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanAndOrdered, fpRegT0, fpRegT1), target);
- break;
- case op_jlesseq:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrEqualAndOrdered, fpRegT0, fpRegT1), target);
- break;
- case op_jgreater:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleGreaterThanAndOrdered, fpRegT0, fpRegT1), target);
- break;
- case op_jgreatereq:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleGreaterThanOrEqualAndOrdered, fpRegT0, fpRegT1), target);
- break;
- case op_jnless:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
- break;
- case op_jnlesseq:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
- break;
- case op_jngreater:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
- break;
- case op_jngreatereq:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), target);
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
-
- end.link(this);
-}
-
// Mod (%)
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
Modified: trunk/Source/_javascript_Core/jit/JITOpcodes32_64.cpp (261641 => 261642)
--- trunk/Source/_javascript_Core/jit/JITOpcodes32_64.cpp 2020-05-13 20:35:31 UTC (rev 261641)
+++ trunk/Source/_javascript_Core/jit/JITOpcodes32_64.cpp 2020-05-13 20:37:35 UTC (rev 261642)
@@ -30,9 +30,12 @@
#if USE(JSVALUE32_64)
#include "JIT.h"
+#include "BasicBlockLocation.h"
+#include "BytecodeGenerator.h"
#include "BytecodeStructs.h"
#include "CCallHelpers.h"
#include "Exception.h"
+#include "InterpreterInlines.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSCast.h"
Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp (261641 => 261642)
--- trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2020-05-13 20:35:31 UTC (rev 261641)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2020-05-13 20:37:35 UTC (rev 261642)
@@ -1262,6 +1262,8 @@
emitWriteBarrier(base, value, ShouldFilterValue);
}
+template void JIT::emit_op_put_by_val<OpPutByVal>(const Instruction*);
+
#else // USE(JSVALUE64)
void JIT::emitWriteBarrier(VirtualRegister owner, VirtualRegister value, WriteBarrierMode mode)
@@ -1302,6 +1304,48 @@
valueNotCell.link(this);
}
+template <typename Op>
+JITPutByIdGenerator JIT::emitPutByValWithCachedId(Op bytecode, PutKind putKind, CacheableIdentifier propertyName, JumpList& doneCases, JumpList& slowCases)
+{
+ // base: tag(regT1), payload(regT0)
+ // property: tag(regT3), payload(regT2)
+
+ VirtualRegister base = bytecode.m_base;
+ VirtualRegister value = bytecode.m_value;
+
+ slowCases.append(branchIfNotCell(regT3));
+ emitByValIdentifierCheck(regT2, regT2, propertyName, slowCases);
+
+ // Write barrier breaks the registers. So after issuing the write barrier,
+ // reload the registers.
+ //
+ // IC can write new Structure without write-barrier if a base is cell.
+ // We are emitting write-barrier before writing here but this is OK since 32bit JSC does not have concurrent GC.
+ // FIXME: Use UnconditionalWriteBarrier in Baseline effectively to reduce code size.
+ // https://bugs.webkit.org/show_bug.cgi?id=209395
+ emitWriteBarrier(base, ShouldFilterBase);
+ emitLoadPayload(base, regT0);
+ emitLoad(value, regT3, regT2);
+
+ JITPutByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), propertyName,
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, bytecode.m_ecmaMode, putKind);
+ gen.generateFastPath(*this);
+ doneCases.append(jump());
+
+ Label coldPathBegin = label();
+ gen.slowPathJump().link(this);
+
+ // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag.
+ emitLoadTag(base, regT1);
+
+ Call call = callOperation(gen.slowPathFunction(), m_codeBlock->globalObject(), gen.stubInfo(), JSValueRegs(regT3, regT2), JSValueRegs(regT1, regT0), propertyName.rawBits());
+ gen.reportSlowPathCall(coldPathBegin, call);
+ doneCases.append(jump());
+
+ return gen;
+}
+
#endif // USE(JSVALUE64)
void JIT::emitWriteBarrier(VirtualRegister owner, WriteBarrierMode mode)
@@ -1663,8 +1707,6 @@
return slowCases;
}
-template void JIT::emit_op_put_by_val<OpPutByVal>(const Instruction*);
-
} // namespace JSC
#endif // ENABLE(JIT)
Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess32_64.cpp (261641 => 261642)
--- trunk/Source/_javascript_Core/jit/JITPropertyAccess32_64.cpp 2020-05-13 20:35:31 UTC (rev 261641)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess32_64.cpp 2020-05-13 20:37:35 UTC (rev 261642)
@@ -29,6 +29,7 @@
#if USE(JSVALUE32_64)
#include "JIT.h"
+#include "CacheableIdentifierInlines.h"
#include "CodeBlock.h"
#include "DirectArguments.h"
#include "GCAwareJITStubRoutine.h"
@@ -37,6 +38,7 @@
#include "JSArray.h"
#include "JSFunction.h"
#include "JSLexicalEnvironment.h"
+#include "JSPromise.h"
#include "LinkBuffer.h"
#include "OpcodeInlines.h"
#include "ResultType.h"
@@ -441,48 +443,6 @@
return slowCases;
}
-template <typename Op>
-JITPutByIdGenerator JIT::emitPutByValWithCachedId(Op bytecode, PutKind putKind, CacheableIdentifier propertyName, JumpList& doneCases, JumpList& slowCases)
-{
- // base: tag(regT1), payload(regT0)
- // property: tag(regT3), payload(regT2)
-
- VirtualRegister base = bytecode.m_base;
- VirtualRegister value = bytecode.m_value;
-
- slowCases.append(branchIfNotCell(regT3));
- emitByValIdentifierCheck(regT2, regT2, propertyName, slowCases);
-
- // Write barrier breaks the registers. So after issuing the write barrier,
- // reload the registers.
- //
- // IC can write new Structure without write-barrier if a base is cell.
- // We are emitting write-barrier before writing here but this is OK since 32bit JSC does not have concurrent GC.
- // FIXME: Use UnconditionalWriteBarrier in Baseline effectively to reduce code size.
- // https://bugs.webkit.org/show_bug.cgi?id=209395
- emitWriteBarrier(base, ShouldFilterBase);
- emitLoadPayload(base, regT0);
- emitLoad(value, regT3, regT2);
-
- JITPutByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeIndex), CallSiteIndex(m_bytecodeIndex), RegisterSet::stubUnavailableRegisters(), propertyName,
- JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, bytecode.m_ecmaMode, putKind);
- gen.generateFastPath(*this);
- doneCases.append(jump());
-
- Label coldPathBegin = label();
- gen.slowPathJump().link(this);
-
- // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag.
- emitLoadTag(base, regT1);
-
- Call call = callOperation(gen.slowPathFunction(), m_codeBlock->globalObject(), gen.stubInfo(), JSValueRegs(regT3, regT2), JSValueRegs(regT1, regT0), propertyName.rawBits());
- gen.reportSlowPathCall(coldPathBegin, call);
- doneCases.append(jump());
-
- return gen;
-}
-
void JIT::emitSlow_op_put_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct;
@@ -1266,6 +1226,8 @@
emitWriteBarrier(base, value, ShouldFilterValue);
}
+template void JIT::emit_op_put_by_val<OpPutByVal>(const Instruction*);
+
} // namespace JSC
#endif // USE(JSVALUE32_64)