Log Message
Unaligned userspace access for SH4 platforms https://bugs.webkit.org/show_bug.cgi?id=79104
Patch by Thouraya ANDOLSI <[email protected]> on 2012-03-26 Reviewed by Gavin Barraclough. * assembler/AbstractMacroAssembler.h: (Jump): (JSC::AbstractMacroAssembler::Jump::Jump): (JSC::AbstractMacroAssembler::Jump::link): * assembler/MacroAssemblerSH4.h: (JSC::MacroAssemblerSH4::load16Unaligned): (JSC::MacroAssemblerSH4::load32WithUnalignedHalfWords): (JSC::MacroAssemblerSH4::branchDouble): (JSC::MacroAssemblerSH4::branchTrue): (JSC::MacroAssemblerSH4::branchFalse): * assembler/SH4Assembler.h: (JSC::SH4Assembler::extraInstrForBranch): (SH4Assembler): (JSC::SH4Assembler::bra): (JSC::SH4Assembler::linkJump): * jit/JIT.h: (JIT): * yarr/YarrJIT.cpp: (JSC::Yarr::YarrGenerator::generatePatternCharacterOnce):
Modified Paths
- trunk/Source/_javascript_Core/ChangeLog
- trunk/Source/_javascript_Core/assembler/AbstractMacroAssembler.h
- trunk/Source/_javascript_Core/assembler/MacroAssemblerSH4.h
- trunk/Source/_javascript_Core/assembler/SH4Assembler.h
- trunk/Source/_javascript_Core/jit/JIT.h
- trunk/Source/_javascript_Core/yarr/YarrJIT.cpp
Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (112191 => 112192)
--- trunk/Source/_javascript_Core/ChangeLog 2012-03-27 02:01:42 UTC (rev 112191)
+++ trunk/Source/_javascript_Core/ChangeLog 2012-03-27 02:03:47 UTC (rev 112192)
@@ -1,3 +1,30 @@
+2012-03-26 Thouraya ANDOLSI <[email protected]>
+
+ Unaligned userspace access for SH4 platforms
+ https://bugs.webkit.org/show_bug.cgi?id=79104
+
+ Reviewed by Gavin Barraclough.
+
+ * assembler/AbstractMacroAssembler.h:
+ (Jump):
+ (JSC::AbstractMacroAssembler::Jump::Jump):
+ (JSC::AbstractMacroAssembler::Jump::link):
+ * assembler/MacroAssemblerSH4.h:
+ (JSC::MacroAssemblerSH4::load16Unaligned):
+ (JSC::MacroAssemblerSH4::load32WithUnalignedHalfWords):
+ (JSC::MacroAssemblerSH4::branchDouble):
+ (JSC::MacroAssemblerSH4::branchTrue):
+ (JSC::MacroAssemblerSH4::branchFalse):
+ * assembler/SH4Assembler.h:
+ (JSC::SH4Assembler::extraInstrForBranch):
+ (SH4Assembler):
+ (JSC::SH4Assembler::bra):
+ (JSC::SH4Assembler::linkJump):
+ * jit/JIT.h:
+ (JIT):
+ * yarr/YarrJIT.cpp:
+ (JSC::Yarr::YarrGenerator::generatePatternCharacterOnce):
+
2012-03-26 Ryosuke Niwa <[email protected]>
cssText should use shorthand notations
Modified: trunk/Source/_javascript_Core/assembler/AbstractMacroAssembler.h (112191 => 112192)
--- trunk/Source/_javascript_Core/assembler/AbstractMacroAssembler.h 2012-03-27 02:01:42 UTC (rev 112191)
+++ trunk/Source/_javascript_Core/assembler/AbstractMacroAssembler.h 2012-03-27 02:03:47 UTC (rev 112192)
@@ -450,6 +450,12 @@
, m_condition(condition)
{
}
+#elif CPU(SH4)
+ Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
+ : m_label(jmp)
+ , m_type(type)
+ {
+ }
#else
Jump(AssemblerLabel jmp)
: m_label(jmp)
@@ -461,6 +467,8 @@
{
#if CPU(ARM_THUMB2)
masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(SH4)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
#else
masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
#endif
@@ -483,6 +491,9 @@
ARMv7Assembler::JumpType m_type;
ARMv7Assembler::Condition m_condition;
#endif
+#if CPU(SH4)
+ SH4Assembler::JumpType m_type;
+#endif
};
// JumpList:
Modified: trunk/Source/_javascript_Core/assembler/MacroAssemblerSH4.h (112191 => 112192)
--- trunk/Source/_javascript_Core/assembler/MacroAssemblerSH4.h 2012-03-27 02:01:42 UTC (rev 112191)
+++ trunk/Source/_javascript_Core/assembler/MacroAssemblerSH4.h 2012-03-27 02:03:47 UTC (rev 112192)
@@ -681,8 +681,7 @@
load8(scr, scr1);
add32(TrustedImm32(1), scr);
load8(scr, dest);
- move(TrustedImm32(8), scr);
- m_assembler.shllRegReg(dest, scr);
+ m_assembler.shllImm8r(8, dest);
or32(scr1, dest);
releaseScratch(scr);
@@ -942,7 +941,13 @@
void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
{
RegisterID scr = claimScratch();
+ RegisterID scr1 = claimScratch();
+ Jump m_jump;
+ JumpList end;
+ if (dest != SH4Registers::r0)
+ move(SH4Registers::r0, scr1);
+
move(address.index, scr);
lshift32(TrustedImm32(address.scale), scr);
add32(address.base, scr);
@@ -950,13 +955,44 @@
if (address.offset)
add32(TrustedImm32(address.offset), scr);
- RegisterID scr1 = claimScratch();
+ m_assembler.ensureSpace(m_assembler.maxInstructionSize + 68, sizeof(uint32_t));
+ move(scr, SH4Registers::r0);
+ m_assembler.andlImm8r(0x3, SH4Registers::r0);
+ m_assembler.cmpEqImmR0(0x0, SH4Registers::r0);
+ m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
+ if (dest != SH4Registers::r0)
+ move(scr1, SH4Registers::r0);
+
+ load32(scr, dest);
+ end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+ m_assembler.nop();
+ m_jump.link(this);
+ m_assembler.andlImm8r(0x1, SH4Registers::r0);
+ m_assembler.cmpEqImmR0(0x0, SH4Registers::r0);
+
+ if (dest != SH4Registers::r0)
+ move(scr1, SH4Registers::r0);
+
+ m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
load16(scr, scr1);
add32(TrustedImm32(2), scr);
load16(scr, dest);
- move(TrustedImm32(16), scr);
- m_assembler.shllRegReg(dest, scr);
+ m_assembler.shllImm8r(16, dest);
or32(scr1, dest);
+ end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
+ m_assembler.nop();
+ m_jump.link(this);
+ load8(scr, scr1);
+ add32(TrustedImm32(1), scr);
+ load16(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ or32(dest, scr1);
+ add32(TrustedImm32(2), scr);
+ load8(scr, dest);
+ m_assembler.shllImm8r(8, dest);
+ m_assembler.shllImm8r(16, dest);
+ or32(scr1, dest);
+ end.link(this);
releaseScratch(scr);
releaseScratch(scr1);
@@ -999,19 +1035,22 @@
if (cond == DoubleNotEqual) {
RegisterID scr = claimScratch();
+ JumpList end;
m_assembler.loadConstant(0x7fbfffff, scratchReg3);
m_assembler.dcnvds(right);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
- m_assembler.branch(BT_OPCODE, 8);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcnvds(left);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
- m_assembler.branch(BT_OPCODE, 4);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcmppeq(right, left);
releaseScratch(scr);
- return branchFalse();
+ Jump m_jump = branchFalse();
+ end.link(this);
+ return m_jump;
}
if (cond == DoubleGreaterThan) {
@@ -1036,113 +1075,135 @@
if (cond == DoubleEqualOrUnordered) {
RegisterID scr = claimScratch();
+ JumpList end;
m_assembler.loadConstant(0x7fbfffff, scratchReg3);
m_assembler.dcnvds(right);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
- m_assembler.branch(BT_OPCODE, 5);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcnvds(left);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
- m_assembler.branch(BT_OPCODE, 1);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcmppeq(left, right);
+ Jump m_jump = Jump(m_assembler.je());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
releaseScratch(scr);
- return branchTrue();
+ return m_jump;
}
if (cond == DoubleGreaterThanOrUnordered) {
RegisterID scr = claimScratch();
+ JumpList end;
m_assembler.loadConstant(0x7fbfffff, scratchReg3);
m_assembler.dcnvds(right);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
- m_assembler.branch(BT_OPCODE, 5);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcnvds(left);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
- m_assembler.branch(BT_OPCODE, 1);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcmppgt(right, left);
+ Jump m_jump = Jump(m_assembler.je());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
releaseScratch(scr);
- return branchTrue();
+ return m_jump;
}
if (cond == DoubleGreaterThanOrEqualOrUnordered) {
RegisterID scr = claimScratch();
+ JumpList end;
m_assembler.loadConstant(0x7fbfffff, scratchReg3);
m_assembler.dcnvds(right);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
- m_assembler.branch(BT_OPCODE, 5);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcnvds(left);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
- m_assembler.branch(BT_OPCODE, 1);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcmppgt(left, right);
+ Jump m_jump = Jump(m_assembler.jne());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
releaseScratch(scr);
- return branchFalse();
+ return m_jump;
}
if (cond == DoubleLessThanOrUnordered) {
RegisterID scr = claimScratch();
+ JumpList end;
m_assembler.loadConstant(0x7fbfffff, scratchReg3);
m_assembler.dcnvds(right);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
- m_assembler.branch(BT_OPCODE, 5);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcnvds(left);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
- m_assembler.branch(BT_OPCODE, 1);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcmppgt(left, right);
+ Jump m_jump = Jump(m_assembler.je());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
releaseScratch(scr);
- return branchTrue();
+ return m_jump;
}
if (cond == DoubleLessThanOrEqualOrUnordered) {
RegisterID scr = claimScratch();
+ JumpList end;
m_assembler.loadConstant(0x7fbfffff, scratchReg3);
m_assembler.dcnvds(right);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
- m_assembler.branch(BT_OPCODE, 5);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcnvds(left);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
- m_assembler.branch(BT_OPCODE, 1);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcmppgt(right, left);
+ Jump m_jump = Jump(m_assembler.jne());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
releaseScratch(scr);
- return branchFalse();
+ return m_jump;
}
ASSERT(cond == DoubleNotEqualOrUnordered);
RegisterID scr = claimScratch();
+ JumpList end;
m_assembler.loadConstant(0x7fbfffff, scratchReg3);
m_assembler.dcnvds(right);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
- m_assembler.branch(BT_OPCODE, 5);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcnvds(left);
m_assembler.stsfpulReg(scr);
m_assembler.cmplRegReg(scratchReg3, scr, SH4Condition(Equal));
- m_assembler.branch(BT_OPCODE, 1);
+ end.append(Jump(m_assembler.je(), SH4Assembler::JumpNear));
m_assembler.dcmppeq(right, left);
+ Jump m_jump = Jump(m_assembler.jne());
+ end.link(this);
+ m_assembler.extraInstrForBranch(scr);
releaseScratch(scr);
- return branchFalse();
+ return m_jump;
}
Jump branchTrue()
{
m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
Jump m_jump = Jump(m_assembler.je());
- m_assembler.loadConstantUnReusable(0x0, scratchReg3);
- m_assembler.nop();
- m_assembler.nop();
+ m_assembler.extraInstrForBranch(scratchReg3);
return m_jump;
}
@@ -1150,9 +1211,7 @@
{
m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
Jump m_jump = Jump(m_assembler.jne());
- m_assembler.loadConstantUnReusable(0x0, scratchReg3);
- m_assembler.nop();
- m_assembler.nop();
+ m_assembler.extraInstrForBranch(scratchReg3);
return m_jump;
}
Modified: trunk/Source/_javascript_Core/assembler/SH4Assembler.h (112191 => 112192)
--- trunk/Source/_javascript_Core/assembler/SH4Assembler.h 2012-03-27 02:01:42 UTC (rev 112191)
+++ trunk/Source/_javascript_Core/assembler/SH4Assembler.h 2012-03-27 02:03:47 UTC (rev 112192)
@@ -326,6 +326,10 @@
padForAlign32 = 0x00090009,
};
+ enum JumpType { JumpFar,
+ JumpNear
+ };
+
SH4Assembler()
{
m_claimscratchReg = 0x0;
@@ -1188,6 +1192,13 @@
return label;
}
+ void extraInstrForBranch(RegisterID dst)
+ {
+ loadConstantUnReusable(0x0, dst);
+ nop();
+ nop();
+ }
+
AssemblerLabel jmp(RegisterID dst)
{
jmpReg(dst);
@@ -1215,6 +1226,13 @@
return label;
}
+ AssemblerLabel bra()
+ {
+ AssemblerLabel label = m_buffer.label();
+ branch(BRA_OPCODE, 0);
+ return label;
+ }
+
void ret()
{
m_buffer.ensureSpace(maxInstructionSize + 2);
@@ -1424,7 +1442,7 @@
// Linking & patching
- void linkJump(AssemblerLabel from, AssemblerLabel to)
+ void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type = JumpFar)
{
ASSERT(to.isSet());
ASSERT(from.isSet());
@@ -1433,6 +1451,14 @@
uint16_t instruction = *instructionPtr;
int offsetBits;
+ if (type == JumpNear) {
+ ASSERT((instruction == BT_OPCODE) || (instruction == BF_OPCODE) || (instruction == BRA_OPCODE));
+ int offset = (codeSize() - from.m_offset) - 4;
+ *instructionPtr++ = instruction | (offset >> 1);
+ printInstr(*instructionPtr, from.m_offset + 2);
+ return;
+ }
+
if (((instruction & 0xff00) == BT_OPCODE) || ((instruction & 0xff00) == BF_OPCODE)) {
/* BT label => BF 2
nop LDR reg
Modified: trunk/Source/_javascript_Core/jit/JIT.h (112191 => 112192)
--- trunk/Source/_javascript_Core/jit/JIT.h 2012-03-27 02:01:42 UTC (rev 112191)
+++ trunk/Source/_javascript_Core/jit/JIT.h 2012-03-27 02:03:47 UTC (rev 112192)
@@ -552,8 +552,8 @@
static const int sequenceGetByIdHotPathInstructionSpace = 36;
static const int sequenceGetByIdHotPathConstantSpace = 5;
// sequenceGetByIdSlowCase
- static const int sequenceGetByIdSlowCaseInstructionSpace = 30;
- static const int sequenceGetByIdSlowCaseConstantSpace = 3;
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 38;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 4;
// sequencePutById
static const int sequencePutByIdInstructionSpace = 36;
static const int sequencePutByIdConstantSpace = 5;
@@ -567,7 +567,7 @@
#if ENABLE(OPCODE_SAMPLING)
static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
#else
- static const int patchOffsetGetByIdSlowCaseCall = 26;
+ static const int patchOffsetGetByIdSlowCaseCall = 34;
#endif
static const int patchOffsetOpCallCompareToJump = 4;
Modified: trunk/Source/_javascript_Core/yarr/YarrJIT.cpp (112191 => 112192)
--- trunk/Source/_javascript_Core/yarr/YarrJIT.cpp 2012-03-27 02:01:42 UTC (rev 112191)
+++ trunk/Source/_javascript_Core/yarr/YarrJIT.cpp 2012-03-27 02:03:47 UTC (rev 112192)
@@ -729,12 +729,12 @@
return;
case 2: {
BaseIndex address(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
- load16(address, character);
+ load16Unaligned(address, character);
break;
}
case 3: {
BaseIndex highAddress(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
- load16(highAddress, character);
+ load16Unaligned(highAddress, character);
if (ignoreCaseMask)
or32(Imm32(ignoreCaseMask), character);
op.m_jumps.append(branch32(NotEqual, character, Imm32((allCharacters & 0xffff) | ignoreCaseMask)));
_______________________________________________ webkit-changes mailing list [email protected] http://lists.webkit.org/mailman/listinfo.cgi/webkit-changes
