ARM64: SW skinning runs without crashing but is broken.

This commit is contained in:
Henrik Rydgård 2015-04-05 11:12:42 +02:00 committed by Henrik Rydgard
parent f937b4b74b
commit 459ba28655
6 changed files with 55 additions and 35 deletions

View File

@ -2693,6 +2693,7 @@ void ARM64FloatEmitter::FMOV(ARM64Reg Rd, ARM64Reg Rn, bool top)
if (IsScalar(Rd) && IsScalar(Rn)) {
EmitScalar1Source(0, 0, IsDouble(Rd), 0, Rd, Rn);
} else {
_assert_msg_(JIT, !IsQuad(Rd) && !IsQuad(Rn), "FMOV can't move to/from quads");
int type = 0;
int rmode = 0;
int opcode = 6;
@ -3547,7 +3548,8 @@ void ARM64FloatEmitter::MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch, bool
void ARM64FloatEmitter::MOVI2FDUP(ARM64Reg Rd, float value, ARM64Reg scratch) {
// TODO: Make it work with more element sizes
// TODO: Optimize - there are shorter solution for many values
MOVI2F(Rd, value, scratch);
ARM64Reg s = (ARM64Reg)(S0 + DecodeReg(Rd));
MOVI2F(s, value, scratch);
DUP(32, Rd, Rd, 0);
}

View File

@ -815,6 +815,9 @@ public:
void FSUB(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
void NOT(ARM64Reg Rd, ARM64Reg Rn);
void ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
void MOV(ARM64Reg Rd, ARM64Reg Rn) {
ORR(Rd, Rn, Rn);
}
void REV16(u8 size, ARM64Reg Rd, ARM64Reg Rn);
void REV32(u8 size, ARM64Reg Rd, ARM64Reg Rn);
void REV64(u8 size, ARM64Reg Rd, ARM64Reg Rn);

View File

@ -491,6 +491,7 @@ static void FPandASIMD1(uint32_t w, uint64_t addr, Instruction *instr) {
char r = Q ? 'q' : 'd';
const char *opname = nullptr;
bool fp = false;
bool nosize = false;
if (U == 0) {
if (opcode < 0x18) {
opname = opnames000[opcode];
@ -503,8 +504,10 @@ static void FPandASIMD1(uint32_t w, uint64_t addr, Instruction *instr) {
}
if (!opname && opcode == 3 && (sz & 2) == 0) {
opname = !(sz & 1) ? "and" : "bic";
nosize = true;
} else if (!opname && opcode == 3 && (sz & 2) == 2) {
opname = !(sz & 1) ? "orr" : "orn";
nosize = true;
}
} else if (U == 1) {
if (opcode < 0x18) {
@ -518,14 +521,24 @@ static void FPandASIMD1(uint32_t w, uint64_t addr, Instruction *instr) {
}
if (!opname && opcode == 3 && (sz & 2) == 0) {
opname = !(sz & 1) ? "eor" : "bsl";
if (!strcmp(opname, "eor"))
nosize = true;
} else if (!opname && opcode == 3 && (sz & 2) == 2) {
opname = !(sz & 1) ? "bit" : "bif";
}
}
int size = (fp ? ((sz & 1) ? 64 : 32) : (sz << 3));
if (opname != nullptr) {
snprintf(instr->text, sizeof(instr->text), "%s.%d %c%d, %c%d, %c%d", opname, size, r, Rd, r, Rn, r, Rm);
if (!nosize) {
snprintf(instr->text, sizeof(instr->text), "%s.%d %c%d, %c%d, %c%d", opname, size, r, Rd, r, Rn, r, Rm);
} else {
if (!strcmp(opname, "orr") && Rn == Rm) {
snprintf(instr->text, sizeof(instr->text), "mov %c%d, %c%d", r, Rd, r, Rn);
} else {
snprintf(instr->text, sizeof(instr->text), "%s %c%d, %c%d, %c%d", opname, r, Rd, r, Rn, r, Rm);
}
}
} else {
snprintf(instr->text, sizeof(instr->text), "(asimd three-same %08x)", w);
}

View File

@ -255,14 +255,6 @@ JittedVertexDecoder VertexDecoderJitCache::Compile(const VertexDecoder &dec) {
}
}
// TODO: NEON skinning register mapping
// The matrix will be built in Q12-Q15.
// The temporary matrix to be added to the built matrix will be in Q8-Q11.
if (skinning) {
// TODO: Preload scale factors
}
if (dec.col) {
// Or LDB and skip the conditional? This is probably cheaper.
MOV(fullAlphaReg, 0xFF);

View File

@ -43,6 +43,7 @@ static const ARM64Reg tempRegPtr = X3;
static const ARM64Reg tempReg2 = W4;
static const ARM64Reg tempReg3 = W5;
static const ARM64Reg scratchReg = W6;
static const ARM64Reg scratchReg64 = X6;
static const ARM64Reg scratchReg2 = W7;
static const ARM64Reg scratchReg3 = W8;
static const ARM64Reg fullAlphaReg = W12;
@ -182,11 +183,13 @@ JittedVertexDecoder VertexDecoderJitCache::Compile(const VertexDecoder &dec) {
}
}
#if 1
if (dec.weighttype && g_Config.bSoftwareSkinning && dec.morphcount == 1) {
WARN_LOG(HLE, "vtxdec-arm64 does not support sw skinning");
SetCodePtr(const_cast<u8 *>(start));
return NULL;
}
#endif
// Add code to convert matrices to 4x4.
// Later we might want to do this when the matrices are loaded instead.
@ -202,18 +205,18 @@ JittedVertexDecoder VertexDecoderJitCache::Compile(const VertexDecoder &dec) {
fp.LDR(128, INDEX_UNSIGNED, Q5, X3, 12);
fp.LDR(128, INDEX_UNSIGNED, Q6, X3, 24);
fp.LDR(128, INDEX_UNSIGNED, Q7, X3, 36);
fp.FMUL(32, Q4, Q4, Q3);
fp.FMUL(32, Q5, Q5, Q3);
fp.FMUL(32, Q6, Q6, Q6);
fp.FMUL(32, Q7, Q7, Q7);
// First four matrices are in registers.
// First four matrices are in registers Q16+.
if (i < 4) {
fp.FMOV((ARM64Reg)(Q16 + i * 4), Q4);
fp.FMOV((ARM64Reg)(Q17 + i * 4), Q5);
fp.FMOV((ARM64Reg)(Q18 + i * 4), Q6);
fp.FMOV((ARM64Reg)(Q19 + i * 4), Q7);
fp.FMUL(32, (ARM64Reg)(Q16 + i * 4), Q4, Q3);
fp.FMUL(32, (ARM64Reg)(Q17 + i * 4), Q5, Q3);
fp.FMUL(32, (ARM64Reg)(Q18 + i * 4), Q6, Q6);
fp.FMUL(32, (ARM64Reg)(Q19 + i * 4), Q7, Q7);
ADDI2R(X4, X4, 16 * 4);
} else {
fp.FMUL(32, Q4, Q4, Q3);
fp.FMUL(32, Q5, Q5, Q3);
fp.FMUL(32, Q6, Q6, Q6);
fp.FMUL(32, Q7, Q7, Q7);
fp.STR(128, INDEX_UNSIGNED, Q4, X4, 0);
fp.STR(128, INDEX_UNSIGNED, Q5, X4, 16);
fp.STR(128, INDEX_UNSIGNED, Q6, X4, 32);
@ -286,7 +289,7 @@ void VertexDecoderJitCache::Jit_ApplyWeights() {
// We construct a matrix in Q4-Q7
// We can use Q1 as temp.
if (dec_->nweights >= 2) {
MOVP2R(scratchReg, bones + 16 * 2);
MOVP2R(scratchReg64, bones + 16 * 2);
}
for (int i = 0; i < dec_->nweights; i++) {
@ -304,34 +307,33 @@ void VertexDecoderJitCache::Jit_ApplyWeights() {
fp.FMLA(32, Q7, Q23, neonWeightRegsQ[0], 1);
break;
case 2:
fp.FMLA(32, Q4, Q24, neonWeightRegsQ[0], 1);
fp.FMLA(32, Q5, Q25, neonWeightRegsQ[0], 1);
fp.FMLA(32, Q6, Q26, neonWeightRegsQ[0], 1);
fp.FMLA(32, Q7, Q27, neonWeightRegsQ[0], 1);
fp.FMLA(32, Q4, Q24, neonWeightRegsQ[0], 2);
fp.FMLA(32, Q5, Q25, neonWeightRegsQ[0], 2);
fp.FMLA(32, Q6, Q26, neonWeightRegsQ[0], 2);
fp.FMLA(32, Q7, Q27, neonWeightRegsQ[0], 2);
break;
case 3:
fp.FMLA(32, Q4, Q28, neonWeightRegsQ[0], 1);
fp.FMLA(32, Q5, Q29, neonWeightRegsQ[0], 1);
fp.FMLA(32, Q6, Q30, neonWeightRegsQ[0], 1);
fp.FMLA(32, Q7, Q31, neonWeightRegsQ[0], 1);
fp.FMLA(32, Q4, Q28, neonWeightRegsQ[0], 3);
fp.FMLA(32, Q5, Q29, neonWeightRegsQ[0], 3);
fp.FMLA(32, Q6, Q30, neonWeightRegsQ[0], 3);
fp.FMLA(32, Q7, Q31, neonWeightRegsQ[0], 3);
break;
default:
// Matrices 2+ need to be loaded from memory.
// Wonder if we can free up one more register so we could get some parallelism.
// Actually Q3 is free if there are fewer than 5 weights...
fp.LDP(INDEX_SIGNED, Q8, Q9, scratchReg, 0);
fp.LDP(INDEX_SIGNED, Q10, Q11, scratchReg, 2 * 16);
fp.LDP(INDEX_SIGNED, Q8, Q9, scratchReg64, 0);
fp.LDP(INDEX_SIGNED, Q10, Q11, scratchReg64, 2 * 16);
fp.FMLA(32, Q4, Q8, neonWeightRegsQ[i >> 2], i & 3);
fp.FMLA(32, Q5, Q9, neonWeightRegsQ[i >> 2], i & 3);
fp.FMLA(32, Q6, Q10, neonWeightRegsQ[i >> 2], i & 3);
fp.FMLA(32, Q7, Q11, neonWeightRegsQ[i >> 2], i & 3);
ADDI2R(scratchReg, scratchReg, 4 * 16);
ADDI2R(scratchReg64, scratchReg64, 4 * 16);
break;
}
}
}
void VertexDecoderJitCache::Jit_WeightsU8() {
// Basic implementation - a byte at a time. TODO: Optimize
int j;
@ -393,7 +395,7 @@ void VertexDecoderJitCache::Jit_WeightsU8Skin() {
}
// TODO: Get rid of this constant, use fixed point conversion
fp.MOVI2FDUP(Q3, by128, X0);
fp.MOVI2FDUP(Q3, by128, scratchReg);
fp.UXTL(8, neonScratchRegQ, neonScratchReg);
fp.UXTL(16, neonScratchRegQ, neonScratchReg);
fp.UCVTF(neonScratchRegQ, neonScratchRegQ);
@ -425,7 +427,7 @@ void VertexDecoderJitCache::Jit_WeightsU16Skin() {
fp.LDR(64, INDEX_UNSIGNED, neonScratchReg, srcReg, 0);
break;
}
fp.MOVI2FDUP(Q3, by32768, X0);
fp.MOVI2FDUP(Q3, by32768, scratchReg);
fp.UXTL(16, neonScratchRegQ, neonScratchReg);
fp.UCVTF(neonScratchRegQ, neonScratchRegQ);
fp.FMUL(32, neonWeightRegsQ[0], neonScratchRegQ, Q3);

View File

@ -101,6 +101,12 @@ bool TestArm64Emitter() {
RET(CheckLast(emitter, "1e260061 fmov w1, s3"));
fp.FMOV(S1, W3);
RET(CheckLast(emitter, "1e270061 fmov s1, w3"));
/*
fp.FMOV(X1, D3);
RET(CheckLast(emitter, "1e260061 fmov x1, d3"));
fp.FMOV(D1, X3);
RET(CheckLast(emitter, "1e270061 fmov d1, x3"));
*/
fp.SCVTF(S13, S12);
RET(CheckLast(emitter, "5e21d98d scvtf s13, s12"));
fp.FCVTS(S13, S12, ROUND_N);
@ -187,6 +193,8 @@ bool TestArm64Emitter() {
RET(CheckLast(emitter, "1e20c334 fabs s20, s25"));
fp.FMOV(S20, S25);
RET(CheckLast(emitter, "1e204334 fmov s20, s25"));
fp.MOV(Q20, Q25);
RET(CheckLast(emitter, "4eb91f34 mov q20, q25"));
fp.FCMP(S7);
RET(CheckLast(emitter, "1e2020e8 fcmp s7, #0.0"));
fp.FCMP(D7, D3);