@@ -645,7 +645,6 @@ oaknut::XReg A64Emitter::GetContextReg() { return X27; }
645645oaknut::XReg A64Emitter::GetMembaseReg () { return X28; }
646646
647647void A64Emitter::ReloadContext () {
648- // mov(GetContextReg(), qword[rsp + StackLayout::GUEST_CTX_HOME]);
649648 LDR (GetContextReg (), SP, StackLayout::GUEST_CTX_HOME);
650649}
651650
@@ -667,20 +666,13 @@ bool A64Emitter::ConstantFitsIn32Reg(uint64_t v) {
667666
668667void A64Emitter::MovMem64 (const oaknut::XRegSp& addr, intptr_t offset,
669668 uint64_t v) {
670- // if ((v & ~0x7FFFFFFF) == 0) {
671- // // Fits under 31 bits, so just load using normal mov.
672- // mov(qword[addr], v);
673- // } else if ((v & ~0x7FFFFFFF) == ~0x7FFFFFFF) {
674- // // Negative number that fits in 32bits.
675- // mov(qword[addr], v);
676- // } else if (!(v >> 32)) {
677- // // All high bits are zero. It'd be nice if we had a way to load a 32bit
678- // // immediate without sign extending!
679- // // TODO(benvanik): this is super common, find a better way.
680- // mov(dword[addr], static_cast<uint32_t>(v));
681- // mov(dword[addr + 4], 0);
682- // } else
683- {
669+ if (v == 0 ) {
670+ STR (XZR, addr, offset);
671+ } else if (!(v >> 32 )) {
672+ // All high bits are zero, 32-bit MOV
673+ MOV (W0, static_cast <uint32_t >(v));
674+ STR (X0, addr, offset);
675+ } else {
684676 // 64bit number that needs double movs.
685677 MOV (X0, v);
686678 STR (X0, addr, offset);
0 commit comments