diff --git a/js/src/nanojit/NativeSH4-auto-generated.h b/js/src/nanojit/NativeSH4-auto-generated.h new file mode 100644 index 000000000000..9e21b32aaa9b --- /dev/null +++ b/js/src/nanojit/NativeSH4-auto-generated.h @@ -0,0 +1,1726 @@ +/* THIS FILE IS AUTO-GENERATED */ + +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is [Open Source Virtual Machine]. + * + * The Initial Developer of the Original Code is + * STMicroelectronics. + * Portions created by the Initial Developer are Copyright (C) 2010 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * Cédric VINCENT for STMicroelectronics + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#define SH4_CHECK_RANGE_add_imm(imm) ((imm) >= -128 && (imm) <= 127) + +#define FITS_SH4_add_imm(imm) (SH4_CHECK_RANGE_add_imm(imm)) + + inline void Assembler::SH4_add_imm(int imm, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_add_imm(imm)); + SH4_emit16((0x7 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((imm & 0xFF) << 0)); + asm_output("add_imm %d, R%d", imm, Rx.n); + } + + inline void Assembler::SH4_add(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xC << 0)); + asm_output("add R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_addc(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xE << 0)); + asm_output("addc R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_addv(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xF << 0)); + asm_output("addv R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_and_imm_R0(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_and_imm_R0(imm) (SH4_CHECK_RANGE_and_imm_R0(imm)) + + inline void Assembler::SH4_and_imm_R0(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_and_imm_R0(imm)); + SH4_emit16((0xC << 12) | (0x9 << 8) | ((imm & 0xFF) << 0)); + asm_output("and_imm_R0 %d", imm); + } + + inline void Assembler::SH4_and(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x9 << 0)); + asm_output("and R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_andb_imm_dispR0GBR(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_andb_imm_dispR0GBR(imm) (SH4_CHECK_RANGE_andb_imm_dispR0GBR(imm)) + + inline void Assembler::SH4_andb_imm_dispR0GBR(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_andb_imm_dispR0GBR(imm)); + SH4_emit16((0xC << 12) | (0xD << 8) | ((imm & 0xFF) << 0)); + asm_output("andb_imm_dispR0GBR %d", imm); + } + +#define SH4_CHECK_RANGE_bra(imm) ((imm) >= -4096 && (imm) <= 4094) + +#define SH4_CHECK_ALIGN_bra(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_bra(imm) (SH4_CHECK_RANGE_bra((imm) + 2) && SH4_CHECK_ALIGN_bra((imm) + 2)) + + inline void Assembler::SH4_bra(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_bra(imm + 2) && SH4_CHECK_ALIGN_bra(imm + 2)); + SH4_emit16((0xA << 12) | (((imm & 0x1FFE) >> 1) << 0)); + asm_output("bra %d", imm); + } + +#define SH4_CHECK_RANGE_bsr(imm) ((imm) >= -4096 && (imm) <= 4094) + +#define SH4_CHECK_ALIGN_bsr(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_bsr(imm) (SH4_CHECK_RANGE_bsr((imm) + 2) && SH4_CHECK_ALIGN_bsr((imm) + 2)) + + inline void Assembler::SH4_bsr(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_bsr(imm + 2) && SH4_CHECK_ALIGN_bsr(imm + 2)); + SH4_emit16((0xB << 12) | (((imm & 0x1FFE) >> 1) << 0)); + asm_output("bsr %d", imm); + } + +#define SH4_CHECK_RANGE_bt(imm) ((imm) >= -256 && (imm) <= 254) + +#define SH4_CHECK_ALIGN_bt(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_bt(imm) (SH4_CHECK_RANGE_bt((imm) + 2) && SH4_CHECK_ALIGN_bt((imm) + 2)) + + inline void Assembler::SH4_bt(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_bt(imm + 2) && SH4_CHECK_ALIGN_bt(imm + 2)); + SH4_emit16((0x8 << 12) | (0x9 << 8) | (((imm & 0x1FE) >> 1) << 0)); + asm_output("bt %d", imm); + } + +#define SH4_CHECK_RANGE_bf(imm) ((imm) >= -256 && (imm) <= 254) + +#define SH4_CHECK_ALIGN_bf(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_bf(imm) (SH4_CHECK_RANGE_bf((imm) + 2) && SH4_CHECK_ALIGN_bf((imm) + 2)) + + inline void Assembler::SH4_bf(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_bf(imm + 2) && SH4_CHECK_ALIGN_bf(imm + 2)); + SH4_emit16((0x8 << 12) | (0xB << 8) | (((imm & 0x1FE) >> 1) << 0)); + asm_output("bf %d", imm); + } + +#define SH4_CHECK_RANGE_bts(imm) ((imm) >= -256 && (imm) <= 254) + +#define SH4_CHECK_ALIGN_bts(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_bts(imm) (SH4_CHECK_RANGE_bts((imm) + 2) && SH4_CHECK_ALIGN_bts((imm) + 2)) + + inline void Assembler::SH4_bts(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_bts(imm + 2) && SH4_CHECK_ALIGN_bts(imm + 2)); + SH4_emit16((0x8 << 12) | (0xD << 8) | (((imm & 0x1FE) >> 1) << 0)); + asm_output("bts %d", imm); + } + +#define SH4_CHECK_RANGE_bfs(imm) ((imm) >= -256 && (imm) <= 254) + +#define SH4_CHECK_ALIGN_bfs(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_bfs(imm) (SH4_CHECK_RANGE_bfs((imm) + 2) && SH4_CHECK_ALIGN_bfs((imm) + 2)) + + inline void Assembler::SH4_bfs(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_bfs(imm + 2) && SH4_CHECK_ALIGN_bfs(imm + 2)); + SH4_emit16((0x8 << 12) | (0xF << 8) | (((imm & 0x1FE) >> 1) << 0)); + asm_output("bfs %d", imm); + } + + inline void Assembler::SH4_clrmac() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x2 << 4) | (0x8 << 0)); + asm_output("clrmac"); + } + + inline void Assembler::SH4_clrs() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x4 << 4) | (0x8 << 0)); + asm_output("clrs"); + } + + inline void Assembler::SH4_clrt() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x0 << 4) | (0x8 << 0)); + asm_output("clrt"); + } + +#define SH4_CHECK_RANGE_cmpeq_imm_R0(imm) ((imm) >= -128 && (imm) <= 127) + +#define FITS_SH4_cmpeq_imm_R0(imm) (SH4_CHECK_RANGE_cmpeq_imm_R0(imm)) + + inline void Assembler::SH4_cmpeq_imm_R0(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_cmpeq_imm_R0(imm)); + SH4_emit16((0x8 << 12) | (0x8 << 8) | ((imm & 0xFF) << 0)); + asm_output("cmpeq_imm_R0 %d", imm); + } + + inline void Assembler::SH4_cmpeq(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x0 << 0)); + asm_output("cmpeq R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_cmpge(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x3 << 0)); + asm_output("cmpge R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_cmpgt(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x7 << 0)); + asm_output("cmpgt R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_cmphi(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x6 << 0)); + asm_output("cmphi R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_cmphs(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x2 << 0)); + asm_output("cmphs R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_cmppl(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x5 << 0)); + asm_output("cmppl R%d", Rx.n); + } + + inline void Assembler::SH4_cmppz(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x1 << 0)); + asm_output("cmppz R%d", Rx.n); + } + + inline void Assembler::SH4_cmpstr(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xC << 0)); + asm_output("cmpstr R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_div0s(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x7 << 0)); + asm_output("div0s R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_div0u() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x1 << 4) | (0x9 << 0)); + asm_output("div0u"); + } + + inline void Assembler::SH4_div1(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x4 << 0)); + asm_output("div1 R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_extsb(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xE << 0)); + asm_output("extsb R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_extsw(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xF << 0)); + asm_output("extsw R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_extub(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xC << 0)); + asm_output("extub R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_extuw(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xD << 0)); + asm_output("extuw R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_icbi_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0xE << 4) | (0x3 << 0)); + asm_output("icbi_indRx R%d", Rx.n); + } + + inline void Assembler::SH4_jmp_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0xB << 0)); + asm_output("jmp_indRx R%d", Rx.n); + } + + inline void Assembler::SH4_jsr_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0xB << 0)); + asm_output("jsr_indRx R%d", Rx.n); + } + + inline void Assembler::SH4_ldc_SR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0xE << 0)); + asm_output("ldc_SR R%d", Rx.n); + } + + inline void Assembler::SH4_ldc_GBR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0xE << 0)); + asm_output("ldc_GBR R%d", Rx.n); + } + + inline void Assembler::SH4_ldc_SGR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x3 << 4) | (0xA << 0)); + asm_output("ldc_SGR R%d", Rx.n); + } + + inline void Assembler::SH4_ldc_VBR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0xE << 0)); + asm_output("ldc_VBR R%d", Rx.n); + } + + inline void Assembler::SH4_ldc_SSR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x3 << 4) | (0xE << 0)); + asm_output("ldc_SSR R%d", Rx.n); + } + + inline void Assembler::SH4_ldc_SPC(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x4 << 4) | (0xE << 0)); + asm_output("ldc_SPC R%d", Rx.n); + } + + inline void Assembler::SH4_ldc_DBR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0xF << 4) | (0xA << 0)); + asm_output("ldc_DBR R%d", Rx.n); + } + +#define SH4_CHECK_RANGE_ldc_bank(imm) ((imm) >= 0 && (imm) <= 7) + +#define FITS_SH4_ldc_bank(imm) (SH4_CHECK_RANGE_ldc_bank(imm)) + + inline void Assembler::SH4_ldc_bank(Register Rx, int imm) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_ldc_bank(imm)); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((imm & 0x7) << 4) | (0xE << 0)); + asm_output("ldc_bank R%d, %d", Rx.n, imm); + } + + inline void Assembler::SH4_ldcl_incRx_SR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x7 << 0)); + asm_output("ldcl_incRx_SR R%d", Rx.n); + } + + inline void Assembler::SH4_ldcl_incRx_GBR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x7 << 0)); + asm_output("ldcl_incRx_GBR R%d", Rx.n); + } + + inline void Assembler::SH4_ldcl_incRx_VBR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x7 << 0)); + asm_output("ldcl_incRx_VBR R%d", Rx.n); + } + + inline void Assembler::SH4_ldcl_incRx_SGR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x3 << 4) | (0x6 << 0)); + asm_output("ldcl_incRx_SGR R%d", Rx.n); + } + + inline void Assembler::SH4_ldcl_incRx_SSR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x3 << 4) | (0x7 << 0)); + asm_output("ldcl_incRx_SSR R%d", Rx.n); + } + + inline void Assembler::SH4_ldcl_incRx_SPC(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x4 << 4) | (0x7 << 0)); + asm_output("ldcl_incRx_SPC R%d", Rx.n); + } + + inline void Assembler::SH4_ldcl_incRx_DBR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0xF << 4) | (0x6 << 0)); + asm_output("ldcl_incRx_DBR R%d", Rx.n); + } + +#define SH4_CHECK_RANGE_ldcl_incRx_bank(imm) ((imm) >= 0 && (imm) <= 7) + +#define FITS_SH4_ldcl_incRx_bank(imm) (SH4_CHECK_RANGE_ldcl_incRx_bank(imm)) + + inline void Assembler::SH4_ldcl_incRx_bank(Register Rx, int imm) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_ldcl_incRx_bank(imm)); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((imm & 0x7) << 4) | (0x7 << 0)); + asm_output("ldcl_incRx_bank R%d, %d", Rx.n, imm); + } + + inline void Assembler::SH4_lds_MACH(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0xA << 0)); + asm_output("lds_MACH R%d", Rx.n); + } + + inline void Assembler::SH4_lds_MACL(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0xA << 0)); + asm_output("lds_MACL R%d", Rx.n); + } + + inline void Assembler::SH4_lds_PR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0xA << 0)); + asm_output("lds_PR R%d", Rx.n); + } + + inline void Assembler::SH4_lds_FPUL(Register Ry) { + NanoAssert(1 && REGNUM(Ry) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Ry) & 0xF) << 8) | (0x5 << 4) | (0xA << 0)); + asm_output("lds_FPUL R%d", Ry.n); + } + + inline void Assembler::SH4_lds_FPSCR(Register Ry) { + NanoAssert(1 && REGNUM(Ry) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Ry) & 0xF) << 8) | (0x6 << 4) | (0xA << 0)); + asm_output("lds_FPSCR R%d", Ry.n); + } + + inline void Assembler::SH4_ldsl_incRx_MACH(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x6 << 0)); + asm_output("ldsl_incRx_MACH R%d", Rx.n); + } + + inline void Assembler::SH4_ldsl_incRx_MACL(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x6 << 0)); + asm_output("ldsl_incRx_MACL R%d", Rx.n); + } + + inline void Assembler::SH4_ldsl_incRx_PR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x6 << 0)); + asm_output("ldsl_incRx_PR R%d", Rx.n); + } + + inline void Assembler::SH4_ldsl_incRy_FPUL(Register Ry) { + NanoAssert(1 && REGNUM(Ry) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Ry) & 0xF) << 8) | (0x5 << 4) | (0x6 << 0)); + asm_output("ldsl_incRy_FPUL R%d", Ry.n); + } + + inline void Assembler::SH4_ldsl_incRy_FPSCR(Register Ry) { + NanoAssert(1 && REGNUM(Ry) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Ry) & 0xF) << 8) | (0x6 << 4) | (0x6 << 0)); + asm_output("ldsl_incRy_FPSCR R%d", Ry.n); + } + + inline void Assembler::SH4_ldtlb() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x3 << 4) | (0x8 << 0)); + asm_output("ldtlb"); + } + + inline void Assembler::SH4_macw_incRy_incRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xF << 0)); + asm_output("macw_incRy_incRx R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_mov_imm(imm) ((imm) >= -128 && (imm) <= 127) + +#define FITS_SH4_mov_imm(imm) (SH4_CHECK_RANGE_mov_imm(imm)) + + inline void Assembler::SH4_mov_imm(int imm, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_mov_imm(imm)); + SH4_emit16((0xE << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((imm & 0xFF) << 0)); + asm_output("mov_imm %d, R%d", imm, Rx.n); + } + + inline void Assembler::SH4_mov(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x3 << 0)); + asm_output("mov R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movb_dispR0Rx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x4 << 0)); + asm_output("movb_dispR0Rx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movb_decRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x4 << 0)); + asm_output("movb_decRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movb_indRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x0 << 0)); + asm_output("movb_indRx R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_movb_dispRy_R0(imm) ((imm) >= 0 && (imm) <= 15) + +#define FITS_SH4_movb_dispRy_R0(imm) (SH4_CHECK_RANGE_movb_dispRy_R0(imm)) + + inline void Assembler::SH4_movb_dispRy_R0(int imm, Register Ry) { + NanoAssert(1 && REGNUM(Ry) <= 15 && SH4_CHECK_RANGE_movb_dispRy_R0(imm)); + SH4_emit16((0x8 << 12) | (0x4 << 8) | ((REGNUM(Ry) & 0xF) << 4) | ((imm & 0xF) << 0)); + asm_output("movb_dispRy_R0 %d, R%d", imm, Ry.n); + } + +#define SH4_CHECK_RANGE_movb_dispGBR_R0(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_movb_dispGBR_R0(imm) (SH4_CHECK_RANGE_movb_dispGBR_R0(imm)) + + inline void Assembler::SH4_movb_dispGBR_R0(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_movb_dispGBR_R0(imm)); + SH4_emit16((0xC << 12) | (0x4 << 8) | ((imm & 0xFF) << 0)); + asm_output("movb_dispGBR_R0 %d", imm); + } + + inline void Assembler::SH4_movb_dispR0Ry(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xC << 0)); + asm_output("movb_dispR0Ry R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movb_incRy(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x4 << 0)); + asm_output("movb_incRy R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movb_indRy(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x0 << 0)); + asm_output("movb_indRy R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_movb_R0_dispRx(imm) ((imm) >= 0 && (imm) <= 15) + +#define FITS_SH4_movb_R0_dispRx(imm) (SH4_CHECK_RANGE_movb_R0_dispRx(imm)) + + inline void Assembler::SH4_movb_R0_dispRx(int imm, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_movb_R0_dispRx(imm)); + SH4_emit16((0x8 << 12) | (0x0 << 8) | ((REGNUM(Rx) & 0xF) << 4) | ((imm & 0xF) << 0)); + asm_output("movb_R0_dispRx %d, R%d", imm, Rx.n); + } + +#define SH4_CHECK_RANGE_movb_R0_dispGBR(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_movb_R0_dispGBR(imm) (SH4_CHECK_RANGE_movb_R0_dispGBR(imm)) + + inline void Assembler::SH4_movb_R0_dispGBR(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_movb_R0_dispGBR(imm)); + SH4_emit16((0xC << 12) | (0x0 << 8) | ((imm & 0xFF) << 0)); + asm_output("movb_R0_dispGBR %d", imm); + } + +#define SH4_CHECK_RANGE_movl_dispRx(imm) ((imm) >= 0 && (imm) <= 60) + +#define SH4_CHECK_ALIGN_movl_dispRx(imm) (((imm) & 0x3) == 0) + +#define FITS_SH4_movl_dispRx(imm) (SH4_CHECK_RANGE_movl_dispRx(imm) && SH4_CHECK_ALIGN_movl_dispRx(imm)) + + inline void Assembler::SH4_movl_dispRx(Register Ry, int imm, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15 && SH4_CHECK_RANGE_movl_dispRx(imm) && SH4_CHECK_ALIGN_movl_dispRx(imm)); + SH4_emit16((0x1 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (((imm & 0x3C) >> 2) << 0)); + asm_output("movl_dispRx R%d, %d, R%d", Ry.n, imm, Rx.n); + } + + inline void Assembler::SH4_movl_dispR0Rx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x6 << 0)); + asm_output("movl_dispR0Rx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movl_decRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x6 << 0)); + asm_output("movl_decRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movl_indRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x2 << 0)); + asm_output("movl_indRx R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_movl_dispRy(imm) ((imm) >= 0 && (imm) <= 60) + +#define SH4_CHECK_ALIGN_movl_dispRy(imm) (((imm) & 0x3) == 0) + +#define FITS_SH4_movl_dispRy(imm) (SH4_CHECK_RANGE_movl_dispRy(imm) && SH4_CHECK_ALIGN_movl_dispRy(imm)) + + inline void Assembler::SH4_movl_dispRy(int imm, Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15 && SH4_CHECK_RANGE_movl_dispRy(imm) && SH4_CHECK_ALIGN_movl_dispRy(imm)); + SH4_emit16((0x5 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (((imm & 0x3C) >> 2) << 0)); + asm_output("movl_dispRy %d, R%d, R%d", imm, Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_movl_dispGBR_R0(imm) ((imm) >= 0 && (imm) <= 1020) + +#define SH4_CHECK_ALIGN_movl_dispGBR_R0(imm) (((imm) & 0x3) == 0) + +#define FITS_SH4_movl_dispGBR_R0(imm) (SH4_CHECK_RANGE_movl_dispGBR_R0(imm) && SH4_CHECK_ALIGN_movl_dispGBR_R0(imm)) + + inline void Assembler::SH4_movl_dispGBR_R0(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_movl_dispGBR_R0(imm) && SH4_CHECK_ALIGN_movl_dispGBR_R0(imm)); + SH4_emit16((0xC << 12) | (0x6 << 8) | (((imm & 0x3FC) >> 2) << 0)); + asm_output("movl_dispGBR_R0 %d", imm); + } + +#define SH4_CHECK_RANGE_movl_dispPC(imm) ((imm) >= 0 && (imm) <= 1020) + +#define SH4_CHECK_ALIGN_movl_dispPC(imm) (((imm) & 0x3) == 0) + +#define FITS_SH4_movl_dispPC(imm) (SH4_CHECK_RANGE_movl_dispPC(imm) && SH4_CHECK_ALIGN_movl_dispPC(imm)) + + inline void Assembler::SH4_movl_dispPC(int imm, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_movl_dispPC(imm) && SH4_CHECK_ALIGN_movl_dispPC(imm)); + SH4_emit16((0xD << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((imm & 0x3FC) >> 2) << 0)); + asm_output("movl_dispPC %d, R%d", imm, Rx.n); + } + + inline void Assembler::SH4_movl_dispR0Ry(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xE << 0)); + asm_output("movl_dispR0Ry R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movl_incRy(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x6 << 0)); + asm_output("movl_incRy R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movl_indRy(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x2 << 0)); + asm_output("movl_indRy R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_movl_R0_dispGBR(imm) ((imm) >= 0 && (imm) <= 1020) + +#define SH4_CHECK_ALIGN_movl_R0_dispGBR(imm) (((imm) & 0x3) == 0) + +#define FITS_SH4_movl_R0_dispGBR(imm) (SH4_CHECK_RANGE_movl_R0_dispGBR(imm) && SH4_CHECK_ALIGN_movl_R0_dispGBR(imm)) + + inline void Assembler::SH4_movl_R0_dispGBR(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_movl_R0_dispGBR(imm) && SH4_CHECK_ALIGN_movl_R0_dispGBR(imm)); + SH4_emit16((0xC << 12) | (0x2 << 8) | (((imm & 0x3FC) >> 2) << 0)); + asm_output("movl_R0_dispGBR %d", imm); + } + + inline void Assembler::SH4_movw_dispR0Rx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x5 << 0)); + asm_output("movw_dispR0Rx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movw_decRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x5 << 0)); + asm_output("movw_decRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movw_indRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x1 << 0)); + asm_output("movw_indRx R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_movw_dispRy_R0(imm) ((imm) >= 0 && (imm) <= 30) + +#define SH4_CHECK_ALIGN_movw_dispRy_R0(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_movw_dispRy_R0(imm) (SH4_CHECK_RANGE_movw_dispRy_R0(imm) && SH4_CHECK_ALIGN_movw_dispRy_R0(imm)) + + inline void Assembler::SH4_movw_dispRy_R0(int imm, Register Ry) { + NanoAssert(1 && REGNUM(Ry) <= 15 && SH4_CHECK_RANGE_movw_dispRy_R0(imm) && SH4_CHECK_ALIGN_movw_dispRy_R0(imm)); + SH4_emit16((0x8 << 12) | (0x5 << 8) | ((REGNUM(Ry) & 0xF) << 4) | (((imm & 0x1E) >> 1) << 0)); + asm_output("movw_dispRy_R0 %d, R%d", imm, Ry.n); + } + +#define SH4_CHECK_RANGE_movw_dispGBR_R0(imm) ((imm) >= 0 && (imm) <= 510) + +#define SH4_CHECK_ALIGN_movw_dispGBR_R0(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_movw_dispGBR_R0(imm) (SH4_CHECK_RANGE_movw_dispGBR_R0(imm) && SH4_CHECK_ALIGN_movw_dispGBR_R0(imm)) + + inline void Assembler::SH4_movw_dispGBR_R0(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_movw_dispGBR_R0(imm) && SH4_CHECK_ALIGN_movw_dispGBR_R0(imm)); + SH4_emit16((0xC << 12) | (0x5 << 8) | (((imm & 0x1FE) >> 1) << 0)); + asm_output("movw_dispGBR_R0 %d", imm); + } + +#define SH4_CHECK_RANGE_movw_dispPC(imm) ((imm) >= 0 && (imm) <= 510) + +#define SH4_CHECK_ALIGN_movw_dispPC(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_movw_dispPC(imm) (SH4_CHECK_RANGE_movw_dispPC(imm) && SH4_CHECK_ALIGN_movw_dispPC(imm)) + + inline void Assembler::SH4_movw_dispPC(int imm, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_movw_dispPC(imm) && SH4_CHECK_ALIGN_movw_dispPC(imm)); + SH4_emit16((0x9 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((imm & 0x1FE) >> 1) << 0)); + asm_output("movw_dispPC %d, R%d", imm, Rx.n); + } + + inline void Assembler::SH4_movw_dispR0Ry(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xD << 0)); + asm_output("movw_dispR0Ry R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movw_incRy(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x5 << 0)); + asm_output("movw_incRy R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_movw_indRy(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x1 << 0)); + asm_output("movw_indRy R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_movw_R0_dispRx(imm) ((imm) >= 0 && (imm) <= 30) + +#define SH4_CHECK_ALIGN_movw_R0_dispRx(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_movw_R0_dispRx(imm) (SH4_CHECK_RANGE_movw_R0_dispRx(imm) && SH4_CHECK_ALIGN_movw_R0_dispRx(imm)) + + inline void Assembler::SH4_movw_R0_dispRx(int imm, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_movw_R0_dispRx(imm) && SH4_CHECK_ALIGN_movw_R0_dispRx(imm)); + SH4_emit16((0x8 << 12) | (0x1 << 8) | ((REGNUM(Rx) & 0xF) << 4) | (((imm & 0x1E) >> 1) << 0)); + asm_output("movw_R0_dispRx %d, R%d", imm, Rx.n); + } + +#define SH4_CHECK_RANGE_movw_R0_dispGBR(imm) ((imm) >= 0 && (imm) <= 510) + +#define SH4_CHECK_ALIGN_movw_R0_dispGBR(imm) (((imm) & 0x1) == 0) + +#define FITS_SH4_movw_R0_dispGBR(imm) (SH4_CHECK_RANGE_movw_R0_dispGBR(imm) && SH4_CHECK_ALIGN_movw_R0_dispGBR(imm)) + + inline void Assembler::SH4_movw_R0_dispGBR(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_movw_R0_dispGBR(imm) && SH4_CHECK_ALIGN_movw_R0_dispGBR(imm)); + SH4_emit16((0xC << 12) | (0x1 << 8) | (((imm & 0x1FE) >> 1) << 0)); + asm_output("movw_R0_dispGBR %d", imm); + } + +#define SH4_CHECK_RANGE_mova_dispPC_R0(imm) ((imm) >= 0 && (imm) <= 1020) + +#define SH4_CHECK_ALIGN_mova_dispPC_R0(imm) (((imm) & 0x3) == 0) + +#define FITS_SH4_mova_dispPC_R0(imm) (SH4_CHECK_RANGE_mova_dispPC_R0(imm) && SH4_CHECK_ALIGN_mova_dispPC_R0(imm)) + + inline void Assembler::SH4_mova_dispPC_R0(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_mova_dispPC_R0(imm) && SH4_CHECK_ALIGN_mova_dispPC_R0(imm)); + SH4_emit16((0xC << 12) | (0x7 << 8) | (((imm & 0x3FC) >> 2) << 0)); + asm_output("mova_dispPC_R0 %d", imm); + } + + inline void Assembler::SH4_movcal_R0_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0xC << 4) | (0x3 << 0)); + asm_output("movcal_R0_indRx R%d", Rx.n); + } + + inline void Assembler::SH4_movcol_R0_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x7 << 4) | (0x3 << 0)); + asm_output("movcol_R0_indRx R%d", Rx.n); + } + + inline void Assembler::SH4_movlil_indRy_R0(Register Ry) { + NanoAssert(1 && REGNUM(Ry) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Ry) & 0xF) << 8) | (0x6 << 4) | (0x3 << 0)); + asm_output("movlil_indRy_R0 R%d", Ry.n); + } + + inline void Assembler::SH4_movt(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x9 << 0)); + asm_output("movt R%d", Rx.n); + } + + inline void Assembler::SH4_movual_indRy_R0(Register Ry) { + NanoAssert(1 && REGNUM(Ry) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Ry) & 0xF) << 8) | (0xA << 4) | (0x9 << 0)); + asm_output("movual_indRy_R0 R%d", Ry.n); + } + + inline void Assembler::SH4_movual_incRy_R0(Register Ry) { + NanoAssert(1 && REGNUM(Ry) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Ry) & 0xF) << 8) | (0xE << 4) | (0x9 << 0)); + asm_output("movual_incRy_R0 R%d", Ry.n); + } + + inline void Assembler::SH4_mulsw(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xF << 0)); + asm_output("mulsw R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_muls(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xF << 0)); + asm_output("muls R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_mull(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x7 << 0)); + asm_output("mull R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_muluw(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xE << 0)); + asm_output("muluw R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_mulu(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xE << 0)); + asm_output("mulu R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_neg(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xB << 0)); + asm_output("neg R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_negc(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xA << 0)); + asm_output("negc R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_nop() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x0 << 4) | (0x9 << 0)); + asm_output("nop"); + } + + inline void Assembler::SH4_not(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x7 << 0)); + asm_output("not R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_ocbi_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x9 << 4) | (0x3 << 0)); + asm_output("ocbi_indRx R%d", Rx.n); + } + + inline void Assembler::SH4_ocbp_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0xA << 4) | (0x3 << 0)); + asm_output("ocbp_indRx R%d", Rx.n); + } + + inline void Assembler::SH4_ocbwb_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0xB << 4) | (0x3 << 0)); + asm_output("ocbwb_indRx R%d", Rx.n); + } + +#define SH4_CHECK_RANGE_or_imm_R0(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_or_imm_R0(imm) (SH4_CHECK_RANGE_or_imm_R0(imm)) + + inline void Assembler::SH4_or_imm_R0(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_or_imm_R0(imm)); + SH4_emit16((0xC << 12) | (0xB << 8) | ((imm & 0xFF) << 0)); + asm_output("or_imm_R0 %d", imm); + } + + inline void Assembler::SH4_or(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xB << 0)); + asm_output("or R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_orb_imm_dispR0GBR(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_orb_imm_dispR0GBR(imm) (SH4_CHECK_RANGE_orb_imm_dispR0GBR(imm)) + + inline void Assembler::SH4_orb_imm_dispR0GBR(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_orb_imm_dispR0GBR(imm)); + SH4_emit16((0xC << 12) | (0xF << 8) | ((imm & 0xFF) << 0)); + asm_output("orb_imm_dispR0GBR %d", imm); + } + + inline void Assembler::SH4_pref_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x8 << 4) | (0x3 << 0)); + asm_output("pref_indRx R%d", Rx.n); + } + + inline void Assembler::SH4_prefi_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0xD << 4) | (0x3 << 0)); + asm_output("prefi_indRx R%d", Rx.n); + } + + inline void Assembler::SH4_rotcl(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x4 << 0)); + asm_output("rotcl R%d", Rx.n); + } + + inline void Assembler::SH4_rotcr(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x5 << 0)); + asm_output("rotcr R%d", Rx.n); + } + + inline void Assembler::SH4_rotl(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x4 << 0)); + asm_output("rotl R%d", Rx.n); + } + + inline void Assembler::SH4_rotr(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x5 << 0)); + asm_output("rotr R%d", Rx.n); + } + + inline void Assembler::SH4_rte() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x2 << 4) | (0xB << 0)); + asm_output("rte"); + } + + inline void Assembler::SH4_rts() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x0 << 4) | (0xB << 0)); + asm_output("rts"); + } + + inline void Assembler::SH4_sets() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x5 << 4) | (0x8 << 0)); + asm_output("sets"); + } + + inline void Assembler::SH4_sett() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x1 << 4) | (0x8 << 0)); + asm_output("sett"); + } + + inline void Assembler::SH4_shad(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xC << 0)); + asm_output("shad R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_shld(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xD << 0)); + asm_output("shld R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_shal(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x0 << 0)); + asm_output("shal R%d", Rx.n); + } + + inline void Assembler::SH4_shar(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x1 << 0)); + asm_output("shar R%d", Rx.n); + } + + inline void Assembler::SH4_shll(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x0 << 0)); + asm_output("shll R%d", Rx.n); + } + + inline void Assembler::SH4_shll16(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x8 << 0)); + asm_output("shll16 R%d", Rx.n); + } + + inline void Assembler::SH4_shll2(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x8 << 0)); + asm_output("shll2 R%d", Rx.n); + } + + inline void Assembler::SH4_shll8(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x8 << 0)); + asm_output("shll8 R%d", Rx.n); + } + + inline void Assembler::SH4_shlr(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x1 << 0)); + asm_output("shlr R%d", Rx.n); + } + + inline void Assembler::SH4_shlr16(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x9 << 0)); + asm_output("shlr16 R%d", Rx.n); + } + + inline void Assembler::SH4_shlr2(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x9 << 0)); + asm_output("shlr2 R%d", Rx.n); + } + + inline void Assembler::SH4_shlr8(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x9 << 0)); + asm_output("shlr8 R%d", Rx.n); + } + + inline void Assembler::SH4_sleep() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x1 << 4) | (0xB << 0)); + asm_output("sleep"); + } + + inline void Assembler::SH4_stc_SR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x2 << 0)); + asm_output("stc_SR R%d", Rx.n); + } + + inline void Assembler::SH4_stc_GBR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x2 << 0)); + asm_output("stc_GBR R%d", Rx.n); + } + + inline void Assembler::SH4_stc_VBR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x2 << 0)); + asm_output("stc_VBR R%d", Rx.n); + } + + inline void Assembler::SH4_stc_SSR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x3 << 4) | (0x2 << 0)); + asm_output("stc_SSR R%d", Rx.n); + } + + inline void Assembler::SH4_stc_SPC(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x4 << 4) | (0x2 << 0)); + asm_output("stc_SPC R%d", Rx.n); + } + + inline void Assembler::SH4_stc_SGR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x3 << 4) | (0xA << 0)); + asm_output("stc_SGR R%d", Rx.n); + } + + inline void Assembler::SH4_stc_DBR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0xF << 4) | (0xA << 0)); + asm_output("stc_DBR R%d", Rx.n); + } + +#define SH4_CHECK_RANGE_stc_bank(imm) ((imm) >= 0 && (imm) <= 7) + +#define FITS_SH4_stc_bank(imm) (SH4_CHECK_RANGE_stc_bank(imm)) + + inline void Assembler::SH4_stc_bank(int imm, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_stc_bank(imm)); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((imm & 0x7) << 4) | (0x2 << 0)); + asm_output("stc_bank %d, R%d", imm, Rx.n); + } + + inline void Assembler::SH4_stcl_SR_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x3 << 0)); + asm_output("stcl_SR_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stcl_VBR_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x3 << 0)); + asm_output("stcl_VBR_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stcl_SSR_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x3 << 4) | (0x3 << 0)); + asm_output("stcl_SSR_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stcl_SPC_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x4 << 4) | (0x3 << 0)); + asm_output("stcl_SPC_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stcl_GBR_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x3 << 0)); + asm_output("stcl_GBR_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stcl_SGR_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x3 << 4) | (0x2 << 0)); + asm_output("stcl_SGR_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stcl_DBR_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0xF << 4) | (0x2 << 0)); + asm_output("stcl_DBR_decRx R%d", Rx.n); + } + +#define SH4_CHECK_RANGE_stcl_bank_decRx(imm) ((imm) >= 0 && (imm) <= 7) + +#define FITS_SH4_stcl_bank_decRx(imm) (SH4_CHECK_RANGE_stcl_bank_decRx(imm)) + + inline void Assembler::SH4_stcl_bank_decRx(int imm, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && SH4_CHECK_RANGE_stcl_bank_decRx(imm)); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((imm & 0x7) << 4) | (0x3 << 0)); + asm_output("stcl_bank_decRx %d, R%d", imm, Rx.n); + } + + inline void Assembler::SH4_sts_MACH(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0xA << 0)); + asm_output("sts_MACH R%d", Rx.n); + } + + inline void Assembler::SH4_sts_MACL(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0xA << 0)); + asm_output("sts_MACL R%d", Rx.n); + } + + inline void Assembler::SH4_sts_PR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0xA << 0)); + asm_output("sts_PR R%d", Rx.n); + } + + inline void Assembler::SH4_sts_FPUL(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x5 << 4) | (0xA << 0)); + asm_output("sts_FPUL R%d", Rx.n); + } + + inline void Assembler::SH4_sts_FPSCR(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x6 << 4) | (0xA << 0)); + asm_output("sts_FPSCR R%d", Rx.n); + } + + inline void Assembler::SH4_stsl_MACH_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x2 << 0)); + asm_output("stsl_MACH_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stsl_MACL_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x2 << 0)); + asm_output("stsl_MACL_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stsl_PR_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x2 << 0)); + asm_output("stsl_PR_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stsl_FPUL_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x5 << 4) | (0x2 << 0)); + asm_output("stsl_FPUL_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_stsl_FPSCR_decRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x6 << 4) | (0x2 << 0)); + asm_output("stsl_FPSCR_decRx R%d", Rx.n); + } + + inline void Assembler::SH4_sub(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x8 << 0)); + asm_output("sub R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_subc(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xA << 0)); + asm_output("subc R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_subv(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xB << 0)); + asm_output("subv R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_swapb(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x8 << 0)); + asm_output("swapb R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_swapw(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x6 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x9 << 0)); + asm_output("swapw R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_synco() { + NanoAssert(1); + SH4_emit16((0x0 << 12) | (0x0 << 8) | (0xA << 4) | (0xB << 0)); + asm_output("synco"); + } + + inline void Assembler::SH4_tasb_indRx(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0xB << 0)); + asm_output("tasb_indRx R%d", Rx.n); + } + +#define SH4_CHECK_RANGE_trapa_imm(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_trapa_imm(imm) (SH4_CHECK_RANGE_trapa_imm(imm)) + + inline void Assembler::SH4_trapa_imm(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_trapa_imm(imm)); + SH4_emit16((0xC << 12) | (0x3 << 8) | ((imm & 0xFF) << 0)); + asm_output("trapa_imm %d", imm); + } + +#define SH4_CHECK_RANGE_tst_imm_R0(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_tst_imm_R0(imm) (SH4_CHECK_RANGE_tst_imm_R0(imm)) + + inline void Assembler::SH4_tst_imm_R0(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_tst_imm_R0(imm)); + SH4_emit16((0xC << 12) | (0x8 << 8) | ((imm & 0xFF) << 0)); + asm_output("tst_imm_R0 %d", imm); + } + + inline void Assembler::SH4_tst(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x8 << 0)); + asm_output("tst R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_tstb_imm_dispR0GBR(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_tstb_imm_dispR0GBR(imm) (SH4_CHECK_RANGE_tstb_imm_dispR0GBR(imm)) + + inline void Assembler::SH4_tstb_imm_dispR0GBR(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_tstb_imm_dispR0GBR(imm)); + SH4_emit16((0xC << 12) | (0xC << 8) | ((imm & 0xFF) << 0)); + asm_output("tstb_imm_dispR0GBR %d", imm); + } + +#define SH4_CHECK_RANGE_xor_imm_R0(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_xor_imm_R0(imm) (SH4_CHECK_RANGE_xor_imm_R0(imm)) + + inline void Assembler::SH4_xor_imm_R0(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_xor_imm_R0(imm)); + SH4_emit16((0xC << 12) | (0xA << 8) | ((imm & 0xFF) << 0)); + asm_output("xor_imm_R0 %d", imm); + } + + inline void Assembler::SH4_xor(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xA << 0)); + asm_output("xor R%d, R%d", Ry.n, Rx.n); + } + +#define SH4_CHECK_RANGE_xorb_imm_dispR0GBR(imm) ((imm) >= 0 && (imm) <= 255) + +#define FITS_SH4_xorb_imm_dispR0GBR(imm) (SH4_CHECK_RANGE_xorb_imm_dispR0GBR(imm)) + + inline void Assembler::SH4_xorb_imm_dispR0GBR(int imm) { + NanoAssert(1 && SH4_CHECK_RANGE_xorb_imm_dispR0GBR(imm)); + SH4_emit16((0xC << 12) | (0xE << 8) | ((imm & 0xFF) << 0)); + asm_output("xorb_imm_dispR0GBR %d", imm); + } + + inline void Assembler::SH4_xtrct(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x2 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xD << 0)); + asm_output("xtrct R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_dt(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x4 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x1 << 4) | (0x0 << 0)); + asm_output("dt R%d", Rx.n); + } + + inline void Assembler::SH4_dmulsl(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xD << 0)); + asm_output("dmulsl R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_dmulul(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x3 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x5 << 0)); + asm_output("dmulul R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_macl_incRy_incRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0xF << 0)); + asm_output("macl_incRy_incRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_braf(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x2 << 4) | (0x3 << 0)); + asm_output("braf R%d", Rx.n); + } + + inline void Assembler::SH4_bsrf(Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15); + SH4_emit16((0x0 << 12) | ((REGNUM(Rx) & 0xF) << 8) | (0x0 << 4) | (0x3 << 0)); + asm_output("bsrf R%d", Rx.n); + } + + inline void Assembler::SH4_fabs(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x5 << 4) | (0xD << 0)); + asm_output("fabs R%d", Rx.n); + } + + inline void Assembler::SH4_fabs_double(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x5 << 4) | (0xD << 0)); + asm_output("fabs_double R%d", Rx.n); + } + + inline void Assembler::SH4_fadd(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x0 << 0)); + asm_output("fadd R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fadd_double(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1) && (REGNUM(Ry) - 16) <= 15 && !((REGNUM(Ry) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x0 << 0)); + asm_output("fadd_double R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fcmpeq(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x4 << 0)); + asm_output("fcmpeq R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fcmpeq_double(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1) && (REGNUM(Ry) - 16) <= 15 && !((REGNUM(Ry) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x4 << 0)); + asm_output("fcmpeq_double R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fcmpgt(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x5 << 0)); + asm_output("fcmpgt R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fcmpgt_double(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1) && (REGNUM(Ry) - 16) <= 15 && !((REGNUM(Ry) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x5 << 0)); + asm_output("fcmpgt_double R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fcnvds_double_FPUL(Register Rx) { + NanoAssert(1 && !((REGNUM(Rx) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0xB << 4) | (0xD << 0)); + asm_output("fcnvds_double_FPUL R%d", Rx.n); + } + + inline void Assembler::SH4_fcnvsd_FPUL_double(Register Rx) { + NanoAssert(1 && !((REGNUM(Rx) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0xA << 4) | (0xD << 0)); + asm_output("fcnvsd_FPUL_double R%d", Rx.n); + } + + inline void Assembler::SH4_fdiv(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x3 << 0)); + asm_output("fdiv R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fdiv_double(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1) && (REGNUM(Ry) - 16) <= 15 && !((REGNUM(Ry) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x3 << 0)); + asm_output("fdiv_double R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fipr(Register Ry, Register Rx) { + NanoAssert(1 && !(((REGNUM(Rx) - 16) & 0x3) || ((REGNUM(Ry) - 16) & 0x3))); + SH4_emit16((0xF << 12) | (((((REGNUM(Rx) - 16) & 0xF) << 2) | (((REGNUM(Ry) - 16) & 0xF) >> 2)) << 8) | (0xE << 4) | (0xD << 0)); + asm_output("fipr R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fldi0(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x8 << 4) | (0xD << 0)); + asm_output("fldi0 R%d", Rx.n); + } + + inline void Assembler::SH4_fldi1(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x9 << 4) | (0xD << 0)); + asm_output("fldi1 R%d", Rx.n); + } + + inline void Assembler::SH4_flds_FPUL(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x1 << 4) | (0xD << 0)); + asm_output("flds_FPUL R%d", Rx.n); + } + + inline void Assembler::SH4_float_FPUL(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x2 << 4) | (0xD << 0)); + asm_output("float_FPUL R%d", Rx.n); + } + + inline void Assembler::SH4_float_FPUL_double(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x2 << 4) | (0xD << 0)); + asm_output("float_FPUL_double R%d", Rx.n); + } + + inline void Assembler::SH4_fmac(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xE << 0)); + asm_output("fmac R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xC << 0)); + asm_output("fmov R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_Xdouble_Xdouble(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xC << 0)); + asm_output("fmov_Xdouble_Xdouble R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_indRy(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x8 << 0)); + asm_output("fmov_indRy R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_indRy_Xdouble(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x8 << 0)); + asm_output("fmov_indRy_Xdouble R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_indRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xA << 0)); + asm_output("fmov_indRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_Xdouble_indRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xA << 0)); + asm_output("fmov_Xdouble_indRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_incRy(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x9 << 0)); + asm_output("fmov_incRy R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_incRy_Xdouble(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x9 << 0)); + asm_output("fmov_incRy_Xdouble R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_decRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xB << 0)); + asm_output("fmov_decRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_Xdouble_decRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xB << 0)); + asm_output("fmov_Xdouble_decRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_dispR0Ry(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x6 << 0)); + asm_output("fmov_dispR0Ry R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_dispR0Ry_Xdouble(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x6 << 0)); + asm_output("fmov_dispR0Ry_Xdouble R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_dispR0Rx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x7 << 0)); + asm_output("fmov_dispR0Rx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmov_Xdouble_dispR0Rx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x7 << 0)); + asm_output("fmov_Xdouble_dispR0Rx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovd_indRy_Xdouble(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x8 << 0)); + asm_output("fmovd_indRy_Xdouble R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovd_Xdouble_indRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xA << 0)); + asm_output("fmovd_Xdouble_indRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovd_incRy_Xdouble(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x9 << 0)); + asm_output("fmovd_incRy_Xdouble R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovd_Xdouble_decRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xB << 0)); + asm_output("fmovd_Xdouble_decRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovd_dispR0Ry_Xdouble(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x6 << 0)); + asm_output("fmovd_dispR0Ry_Xdouble R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovd_Xdouble_dispR0Rx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x7 << 0)); + asm_output("fmovd_Xdouble_dispR0Rx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovs_indRy(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x8 << 0)); + asm_output("fmovs_indRy R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovs_indRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xA << 0)); + asm_output("fmovs_indRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovs_incRy(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x9 << 0)); + asm_output("fmovs_incRy R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovs_decRx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0xB << 0)); + asm_output("fmovs_decRx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovs_dispR0Ry(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && REGNUM(Ry) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | ((REGNUM(Ry) & 0xF) << 4) | (0x6 << 0)); + asm_output("fmovs_dispR0Ry R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmovs_dispR0Rx(Register Ry, Register Rx) { + NanoAssert(1 && REGNUM(Rx) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | ((REGNUM(Rx) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x7 << 0)); + asm_output("fmovs_dispR0Rx R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmul(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x2 << 0)); + asm_output("fmul R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fmul_double(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1) && (REGNUM(Ry) - 16) <= 15 && !((REGNUM(Ry) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x2 << 0)); + asm_output("fmul_double R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fneg(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x4 << 4) | (0xD << 0)); + asm_output("fneg R%d", Rx.n); + } + + inline void Assembler::SH4_fneg_double(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x4 << 4) | (0xD << 0)); + asm_output("fneg_double R%d", Rx.n); + } + + inline void Assembler::SH4_fpchg() { + NanoAssert(1); + SH4_emit16((0xF << 12) | (0x7 << 8) | (0xF << 4) | (0xD << 0)); + asm_output("fpchg"); + } + + inline void Assembler::SH4_frchg() { + NanoAssert(1); + SH4_emit16((0xF << 12) | (0xB << 8) | (0xF << 4) | (0xD << 0)); + asm_output("frchg"); + } + + inline void Assembler::SH4_fsca_FPUL_double(Register Rx) { + NanoAssert(1 && !((REGNUM(Rx) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0xF << 4) | (0xD << 0)); + asm_output("fsca_FPUL_double R%d", Rx.n); + } + + inline void Assembler::SH4_fschg() { + NanoAssert(1); + SH4_emit16((0xF << 12) | (0x3 << 8) | (0xF << 4) | (0xD << 0)); + asm_output("fschg"); + } + + inline void Assembler::SH4_fsqrt(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x6 << 4) | (0xD << 0)); + asm_output("fsqrt R%d", Rx.n); + } + + inline void Assembler::SH4_fsqrt_double(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x6 << 4) | (0xD << 0)); + asm_output("fsqrt_double R%d", Rx.n); + } + + inline void Assembler::SH4_fsrra(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x7 << 4) | (0xD << 0)); + asm_output("fsrra R%d", Rx.n); + } + + inline void Assembler::SH4_fsts_FPUL(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x0 << 4) | (0xD << 0)); + asm_output("fsts_FPUL R%d", Rx.n); + } + + inline void Assembler::SH4_fsub(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && (REGNUM(Ry) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x1 << 0)); + asm_output("fsub R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_fsub_double(Register Ry, Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1) && (REGNUM(Ry) - 16) <= 15 && !((REGNUM(Ry) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (((REGNUM(Ry) - 16) & 0xF) << 4) | (0x1 << 0)); + asm_output("fsub_double R%d, R%d", Ry.n, Rx.n); + } + + inline void Assembler::SH4_ftrc_FPUL(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x3 << 4) | (0xD << 0)); + asm_output("ftrc_FPUL R%d", Rx.n); + } + + inline void Assembler::SH4_ftrc_double_FPUL(Register Rx) { + NanoAssert(1 && (REGNUM(Rx) - 16) <= 15 && !((REGNUM(Rx) - 16) & 0x1)); + SH4_emit16((0xF << 12) | (((REGNUM(Rx) - 16) & 0xF) << 8) | (0x3 << 4) | (0xD << 0)); + asm_output("ftrc_double_FPUL R%d", Rx.n); + } + + inline void Assembler::SH4_ftrv(Register Rx) { + NanoAssert(1 && !((REGNUM(Rx) - 16) & 0x3)); + SH4_emit16((0xF << 12) | (((((REGNUM(Rx) - 16) & 0xF) << 2) | 0x1) << 8) | (0xF << 4) | (0xD << 0)); + asm_output("ftrv R%d", Rx.n); + } diff --git a/js/src/nanojit/NativeSH4.cpp b/js/src/nanojit/NativeSH4.cpp index 83f1b4398df9..2f1f5ec8091e 100644 --- a/js/src/nanojit/NativeSH4.cpp +++ b/js/src/nanojit/NativeSH4.cpp @@ -71,1699 +71,7 @@ namespace nanojit *((uint32_t *)(void *)_nIns) = value; } - /*************************************/ - /* Start of the auto-generated part. */ - -#define SH4_CHECK_RANGE_add_imm(imm) ((imm) >= -128 && (imm) <= 127) - -#define FITS_SH4_add_imm(imm) (SH4_CHECK_RANGE_add_imm(imm)) - - inline void Assembler::SH4_add_imm(int imm, Register Rx) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_add_imm(imm)); - SH4_emit16((0x7 << 12) | ((Rx & 0xF) << 8) | ((imm & 0xFF) << 0)); - asm_output("add_imm %d, R%d", imm, Rx); - } - - inline void Assembler::SH4_add(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xC << 0)); - asm_output("add R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_addc(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xE << 0)); - asm_output("addc R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_addv(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xF << 0)); - asm_output("addv R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_and_imm_R0(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_and_imm_R0(imm) (SH4_CHECK_RANGE_and_imm_R0(imm)) - - inline void Assembler::SH4_and_imm_R0(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_and_imm_R0(imm)); - SH4_emit16((0xC << 12) | (0x9 << 8) | ((imm & 0xFF) << 0)); - asm_output("and_imm_R0 %d", imm); - } - - inline void Assembler::SH4_and(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x9 << 0)); - asm_output("and R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_andb_imm_dispR0GBR(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_andb_imm_dispR0GBR(imm) (SH4_CHECK_RANGE_andb_imm_dispR0GBR(imm)) - - inline void Assembler::SH4_andb_imm_dispR0GBR(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_andb_imm_dispR0GBR(imm)); - SH4_emit16((0xC << 12) | (0xD << 8) | ((imm & 0xFF) << 0)); - asm_output("andb_imm_dispR0GBR %d", imm); - } - -#define SH4_CHECK_RANGE_bra(imm) ((imm) >= -4096 && (imm) <= 4094) - -#define SH4_CHECK_ALIGN_bra(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_bra(imm) (SH4_CHECK_RANGE_bra((imm) + 2) && SH4_CHECK_ALIGN_bra((imm) + 2)) - - inline void Assembler::SH4_bra(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_bra(imm + 2) && SH4_CHECK_ALIGN_bra(imm + 2)); - SH4_emit16((0xA << 12) | (((imm & 0x1FFE) >> 1) << 0)); - asm_output("bra %d", imm); - } - -#define SH4_CHECK_RANGE_bsr(imm) ((imm) >= -4096 && (imm) <= 4094) - -#define SH4_CHECK_ALIGN_bsr(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_bsr(imm) (SH4_CHECK_RANGE_bsr((imm) + 2) && SH4_CHECK_ALIGN_bsr((imm) + 2)) - - inline void Assembler::SH4_bsr(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_bsr(imm + 2) && SH4_CHECK_ALIGN_bsr(imm + 2)); - SH4_emit16((0xB << 12) | (((imm & 0x1FFE) >> 1) << 0)); - asm_output("bsr %d", imm); - } - -#define SH4_CHECK_RANGE_bt(imm) ((imm) >= -256 && (imm) <= 254) - -#define SH4_CHECK_ALIGN_bt(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_bt(imm) (SH4_CHECK_RANGE_bt((imm) + 2) && SH4_CHECK_ALIGN_bt((imm) + 2)) - - inline void Assembler::SH4_bt(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_bt(imm + 2) && SH4_CHECK_ALIGN_bt(imm + 2)); - SH4_emit16((0x8 << 12) | (0x9 << 8) | (((imm & 0x1FE) >> 1) << 0)); - asm_output("bt %d", imm); - } - -#define SH4_CHECK_RANGE_bf(imm) ((imm) >= -256 && (imm) <= 254) - -#define SH4_CHECK_ALIGN_bf(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_bf(imm) (SH4_CHECK_RANGE_bf((imm) + 2) && SH4_CHECK_ALIGN_bf((imm) + 2)) - - inline void Assembler::SH4_bf(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_bf(imm + 2) && SH4_CHECK_ALIGN_bf(imm + 2)); - SH4_emit16((0x8 << 12) | (0xB << 8) | (((imm & 0x1FE) >> 1) << 0)); - asm_output("bf %d", imm); - } - -#define SH4_CHECK_RANGE_bts(imm) ((imm) >= -256 && (imm) <= 254) - -#define SH4_CHECK_ALIGN_bts(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_bts(imm) (SH4_CHECK_RANGE_bts((imm) + 2) && SH4_CHECK_ALIGN_bts((imm) + 2)) - - inline void Assembler::SH4_bts(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_bts(imm + 2) && SH4_CHECK_ALIGN_bts(imm + 2)); - SH4_emit16((0x8 << 12) | (0xD << 8) | (((imm & 0x1FE) >> 1) << 0)); - asm_output("bts %d", imm); - } - -#define SH4_CHECK_RANGE_bfs(imm) ((imm) >= -256 && (imm) <= 254) - -#define SH4_CHECK_ALIGN_bfs(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_bfs(imm) (SH4_CHECK_RANGE_bfs((imm) + 2) && SH4_CHECK_ALIGN_bfs((imm) + 2)) - - inline void Assembler::SH4_bfs(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_bfs(imm + 2) && SH4_CHECK_ALIGN_bfs(imm + 2)); - SH4_emit16((0x8 << 12) | (0xF << 8) | (((imm & 0x1FE) >> 1) << 0)); - asm_output("bfs %d", imm); - } - - inline void Assembler::SH4_clrmac() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x2 << 4) | (0x8 << 0)); - asm_output("clrmac"); - } - - inline void Assembler::SH4_clrs() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x4 << 4) | (0x8 << 0)); - asm_output("clrs"); - } - - inline void Assembler::SH4_clrt() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x0 << 4) | (0x8 << 0)); - asm_output("clrt"); - } - -#define SH4_CHECK_RANGE_cmpeq_imm_R0(imm) ((imm) >= -128 && (imm) <= 127) - -#define FITS_SH4_cmpeq_imm_R0(imm) (SH4_CHECK_RANGE_cmpeq_imm_R0(imm)) - - inline void Assembler::SH4_cmpeq_imm_R0(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_cmpeq_imm_R0(imm)); - SH4_emit16((0x8 << 12) | (0x8 << 8) | ((imm & 0xFF) << 0)); - asm_output("cmpeq_imm_R0 %d", imm); - } - - inline void Assembler::SH4_cmpeq(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x0 << 0)); - asm_output("cmpeq R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_cmpge(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x3 << 0)); - asm_output("cmpge R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_cmpgt(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x7 << 0)); - asm_output("cmpgt R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_cmphi(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x6 << 0)); - asm_output("cmphi R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_cmphs(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x2 << 0)); - asm_output("cmphs R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_cmppl(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x5 << 0)); - asm_output("cmppl R%d", Rx); - } - - inline void Assembler::SH4_cmppz(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x1 << 0)); - asm_output("cmppz R%d", Rx); - } - - inline void Assembler::SH4_cmpstr(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xC << 0)); - asm_output("cmpstr R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_div0s(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x7 << 0)); - asm_output("div0s R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_div0u() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x1 << 4) | (0x9 << 0)); - asm_output("div0u"); - } - - inline void Assembler::SH4_div1(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x4 << 0)); - asm_output("div1 R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_extsb(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xE << 0)); - asm_output("extsb R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_extsw(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xF << 0)); - asm_output("extsw R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_extub(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xC << 0)); - asm_output("extub R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_extuw(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xD << 0)); - asm_output("extuw R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_icbi_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0xE << 4) | (0x3 << 0)); - asm_output("icbi_indRx R%d", Rx); - } - - inline void Assembler::SH4_jmp_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0xB << 0)); - asm_output("jmp_indRx R%d", Rx); - } - - inline void Assembler::SH4_jsr_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0xB << 0)); - asm_output("jsr_indRx R%d", Rx); - } - - inline void Assembler::SH4_ldc_SR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0xE << 0)); - asm_output("ldc_SR R%d", Rx); - } - - inline void Assembler::SH4_ldc_GBR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0xE << 0)); - asm_output("ldc_GBR R%d", Rx); - } - - inline void Assembler::SH4_ldc_SGR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x3 << 4) | (0xA << 0)); - asm_output("ldc_SGR R%d", Rx); - } - - inline void Assembler::SH4_ldc_VBR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0xE << 0)); - asm_output("ldc_VBR R%d", Rx); - } - - inline void Assembler::SH4_ldc_SSR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x3 << 4) | (0xE << 0)); - asm_output("ldc_SSR R%d", Rx); - } - - inline void Assembler::SH4_ldc_SPC(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x4 << 4) | (0xE << 0)); - asm_output("ldc_SPC R%d", Rx); - } - - inline void Assembler::SH4_ldc_DBR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0xF << 4) | (0xA << 0)); - asm_output("ldc_DBR R%d", Rx); - } - -#define SH4_CHECK_RANGE_ldc_bank(imm) ((imm) >= 0 && (imm) <= 7) - -#define FITS_SH4_ldc_bank(imm) (SH4_CHECK_RANGE_ldc_bank(imm)) - - inline void Assembler::SH4_ldc_bank(Register Rx, int imm) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_ldc_bank(imm)); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | ((imm & 0x7) << 4) | (0xE << 0)); - asm_output("ldc_bank R%d, %d", Rx, imm); - } - - inline void Assembler::SH4_ldcl_incRx_SR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x7 << 0)); - asm_output("ldcl_incRx_SR R%d", Rx); - } - - inline void Assembler::SH4_ldcl_incRx_GBR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x7 << 0)); - asm_output("ldcl_incRx_GBR R%d", Rx); - } - - inline void Assembler::SH4_ldcl_incRx_VBR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x7 << 0)); - asm_output("ldcl_incRx_VBR R%d", Rx); - } - - inline void Assembler::SH4_ldcl_incRx_SGR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x3 << 4) | (0x6 << 0)); - asm_output("ldcl_incRx_SGR R%d", Rx); - } - - inline void Assembler::SH4_ldcl_incRx_SSR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x3 << 4) | (0x7 << 0)); - asm_output("ldcl_incRx_SSR R%d", Rx); - } - - inline void Assembler::SH4_ldcl_incRx_SPC(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x4 << 4) | (0x7 << 0)); - asm_output("ldcl_incRx_SPC R%d", Rx); - } - - inline void Assembler::SH4_ldcl_incRx_DBR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0xF << 4) | (0x6 << 0)); - asm_output("ldcl_incRx_DBR R%d", Rx); - } - -#define SH4_CHECK_RANGE_ldcl_incRx_bank(imm) ((imm) >= 0 && (imm) <= 7) - -#define FITS_SH4_ldcl_incRx_bank(imm) (SH4_CHECK_RANGE_ldcl_incRx_bank(imm)) - - inline void Assembler::SH4_ldcl_incRx_bank(Register Rx, int imm) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_ldcl_incRx_bank(imm)); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | ((imm & 0x7) << 4) | (0x7 << 0)); - asm_output("ldcl_incRx_bank R%d, %d", Rx, imm); - } - - inline void Assembler::SH4_lds_MACH(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0xA << 0)); - asm_output("lds_MACH R%d", Rx); - } - - inline void Assembler::SH4_lds_MACL(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0xA << 0)); - asm_output("lds_MACL R%d", Rx); - } - - inline void Assembler::SH4_lds_PR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0xA << 0)); - asm_output("lds_PR R%d", Rx); - } - - inline void Assembler::SH4_lds_FPUL(Register Ry) { - NanoAssert(1 && Ry <= 15); - SH4_emit16((0x4 << 12) | ((Ry & 0xF) << 8) | (0x5 << 4) | (0xA << 0)); - asm_output("lds_FPUL R%d", Ry); - } - - inline void Assembler::SH4_lds_FPSCR(Register Ry) { - NanoAssert(1 && Ry <= 15); - SH4_emit16((0x4 << 12) | ((Ry & 0xF) << 8) | (0x6 << 4) | (0xA << 0)); - asm_output("lds_FPSCR R%d", Ry); - } - - inline void Assembler::SH4_ldsl_incRx_MACH(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x6 << 0)); - asm_output("ldsl_incRx_MACH R%d", Rx); - } - - inline void Assembler::SH4_ldsl_incRx_MACL(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x6 << 0)); - asm_output("ldsl_incRx_MACL R%d", Rx); - } - - inline void Assembler::SH4_ldsl_incRx_PR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x6 << 0)); - asm_output("ldsl_incRx_PR R%d", Rx); - } - - inline void Assembler::SH4_ldsl_incRy_FPUL(Register Ry) { - NanoAssert(1 && Ry <= 15); - SH4_emit16((0x4 << 12) | ((Ry & 0xF) << 8) | (0x5 << 4) | (0x6 << 0)); - asm_output("ldsl_incRy_FPUL R%d", Ry); - } - - inline void Assembler::SH4_ldsl_incRy_FPSCR(Register Ry) { - NanoAssert(1 && Ry <= 15); - SH4_emit16((0x4 << 12) | ((Ry & 0xF) << 8) | (0x6 << 4) | (0x6 << 0)); - asm_output("ldsl_incRy_FPSCR R%d", Ry); - } - - inline void Assembler::SH4_ldtlb() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x3 << 4) | (0x8 << 0)); - asm_output("ldtlb"); - } - - inline void Assembler::SH4_macw_incRy_incRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xF << 0)); - asm_output("macw_incRy_incRx R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_mov_imm(imm) ((imm) >= -128 && (imm) <= 127) - -#define FITS_SH4_mov_imm(imm) (SH4_CHECK_RANGE_mov_imm(imm)) - - inline void Assembler::SH4_mov_imm(int imm, Register Rx) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_mov_imm(imm)); - SH4_emit16((0xE << 12) | ((Rx & 0xF) << 8) | ((imm & 0xFF) << 0)); - asm_output("mov_imm %d, R%d", imm, Rx); - } - - inline void Assembler::SH4_mov(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x3 << 0)); - asm_output("mov R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movb_dispR0Rx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x4 << 0)); - asm_output("movb_dispR0Rx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movb_decRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x4 << 0)); - asm_output("movb_decRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movb_indRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x0 << 0)); - asm_output("movb_indRx R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_movb_dispRy_R0(imm) ((imm) >= 0 && (imm) <= 15) - -#define FITS_SH4_movb_dispRy_R0(imm) (SH4_CHECK_RANGE_movb_dispRy_R0(imm)) - - inline void Assembler::SH4_movb_dispRy_R0(int imm, Register Ry) { - NanoAssert(1 && Ry <= 15 && SH4_CHECK_RANGE_movb_dispRy_R0(imm)); - SH4_emit16((0x8 << 12) | (0x4 << 8) | ((Ry & 0xF) << 4) | ((imm & 0xF) << 0)); - asm_output("movb_dispRy_R0 %d, R%d", imm, Ry); - } - -#define SH4_CHECK_RANGE_movb_dispGBR_R0(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_movb_dispGBR_R0(imm) (SH4_CHECK_RANGE_movb_dispGBR_R0(imm)) - - inline void Assembler::SH4_movb_dispGBR_R0(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_movb_dispGBR_R0(imm)); - SH4_emit16((0xC << 12) | (0x4 << 8) | ((imm & 0xFF) << 0)); - asm_output("movb_dispGBR_R0 %d", imm); - } - - inline void Assembler::SH4_movb_dispR0Ry(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xC << 0)); - asm_output("movb_dispR0Ry R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movb_incRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x4 << 0)); - asm_output("movb_incRy R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movb_indRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x0 << 0)); - asm_output("movb_indRy R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_movb_R0_dispRx(imm) ((imm) >= 0 && (imm) <= 15) - -#define FITS_SH4_movb_R0_dispRx(imm) (SH4_CHECK_RANGE_movb_R0_dispRx(imm)) - - inline void Assembler::SH4_movb_R0_dispRx(int imm, Register Rx) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_movb_R0_dispRx(imm)); - SH4_emit16((0x8 << 12) | (0x0 << 8) | ((Rx & 0xF) << 4) | ((imm & 0xF) << 0)); - asm_output("movb_R0_dispRx %d, R%d", imm, Rx); - } - -#define SH4_CHECK_RANGE_movb_R0_dispGBR(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_movb_R0_dispGBR(imm) (SH4_CHECK_RANGE_movb_R0_dispGBR(imm)) - - inline void Assembler::SH4_movb_R0_dispGBR(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_movb_R0_dispGBR(imm)); - SH4_emit16((0xC << 12) | (0x0 << 8) | ((imm & 0xFF) << 0)); - asm_output("movb_R0_dispGBR %d", imm); - } - -#define SH4_CHECK_RANGE_movl_dispRx(imm) ((imm) >= 0 && (imm) <= 60) - -#define SH4_CHECK_ALIGN_movl_dispRx(imm) (((imm) & 0x3) == 0) - -#define FITS_SH4_movl_dispRx(imm) (SH4_CHECK_RANGE_movl_dispRx(imm) && SH4_CHECK_ALIGN_movl_dispRx(imm)) - - inline void Assembler::SH4_movl_dispRx(Register Ry, int imm, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15 && SH4_CHECK_RANGE_movl_dispRx(imm) && SH4_CHECK_ALIGN_movl_dispRx(imm)); - SH4_emit16((0x1 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (((imm & 0x3C) >> 2) << 0)); - asm_output("movl_dispRx R%d, %d, R%d", Ry, imm, Rx); - } - - inline void Assembler::SH4_movl_dispR0Rx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x6 << 0)); - asm_output("movl_dispR0Rx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movl_decRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x6 << 0)); - asm_output("movl_decRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movl_indRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x2 << 0)); - asm_output("movl_indRx R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_movl_dispRy(imm) ((imm) >= 0 && (imm) <= 60) - -#define SH4_CHECK_ALIGN_movl_dispRy(imm) (((imm) & 0x3) == 0) - -#define FITS_SH4_movl_dispRy(imm) (SH4_CHECK_RANGE_movl_dispRy(imm) && SH4_CHECK_ALIGN_movl_dispRy(imm)) - - inline void Assembler::SH4_movl_dispRy(int imm, Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15 && SH4_CHECK_RANGE_movl_dispRy(imm) && SH4_CHECK_ALIGN_movl_dispRy(imm)); - SH4_emit16((0x5 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (((imm & 0x3C) >> 2) << 0)); - asm_output("movl_dispRy %d, R%d, R%d", imm, Ry, Rx); - } - -#define SH4_CHECK_RANGE_movl_dispGBR_R0(imm) ((imm) >= 0 && (imm) <= 1020) - -#define SH4_CHECK_ALIGN_movl_dispGBR_R0(imm) (((imm) & 0x3) == 0) - -#define FITS_SH4_movl_dispGBR_R0(imm) (SH4_CHECK_RANGE_movl_dispGBR_R0(imm) && SH4_CHECK_ALIGN_movl_dispGBR_R0(imm)) - - inline void Assembler::SH4_movl_dispGBR_R0(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_movl_dispGBR_R0(imm) && SH4_CHECK_ALIGN_movl_dispGBR_R0(imm)); - SH4_emit16((0xC << 12) | (0x6 << 8) | (((imm & 0x3FC) >> 2) << 0)); - asm_output("movl_dispGBR_R0 %d", imm); - } - -#define SH4_CHECK_RANGE_movl_dispPC(imm) ((imm) >= 0 && (imm) <= 1020) - -#define SH4_CHECK_ALIGN_movl_dispPC(imm) (((imm) & 0x3) == 0) - -#define FITS_SH4_movl_dispPC(imm) (SH4_CHECK_RANGE_movl_dispPC(imm) && SH4_CHECK_ALIGN_movl_dispPC(imm)) - - inline void Assembler::SH4_movl_dispPC(int imm, Register Rx) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_movl_dispPC(imm) && SH4_CHECK_ALIGN_movl_dispPC(imm)); - SH4_emit16((0xD << 12) | ((Rx & 0xF) << 8) | (((imm & 0x3FC) >> 2) << 0)); - asm_output("movl_dispPC %d, R%d", imm, Rx); - } - - inline void Assembler::SH4_movl_dispR0Ry(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xE << 0)); - asm_output("movl_dispR0Ry R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movl_incRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x6 << 0)); - asm_output("movl_incRy R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movl_indRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x2 << 0)); - asm_output("movl_indRy R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_movl_R0_dispGBR(imm) ((imm) >= 0 && (imm) <= 1020) - -#define SH4_CHECK_ALIGN_movl_R0_dispGBR(imm) (((imm) & 0x3) == 0) - -#define FITS_SH4_movl_R0_dispGBR(imm) (SH4_CHECK_RANGE_movl_R0_dispGBR(imm) && SH4_CHECK_ALIGN_movl_R0_dispGBR(imm)) - - inline void Assembler::SH4_movl_R0_dispGBR(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_movl_R0_dispGBR(imm) && SH4_CHECK_ALIGN_movl_R0_dispGBR(imm)); - SH4_emit16((0xC << 12) | (0x2 << 8) | (((imm & 0x3FC) >> 2) << 0)); - asm_output("movl_R0_dispGBR %d", imm); - } - - inline void Assembler::SH4_movw_dispR0Rx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x5 << 0)); - asm_output("movw_dispR0Rx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movw_decRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x5 << 0)); - asm_output("movw_decRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movw_indRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x1 << 0)); - asm_output("movw_indRx R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_movw_dispRy_R0(imm) ((imm) >= 0 && (imm) <= 30) - -#define SH4_CHECK_ALIGN_movw_dispRy_R0(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_movw_dispRy_R0(imm) (SH4_CHECK_RANGE_movw_dispRy_R0(imm) && SH4_CHECK_ALIGN_movw_dispRy_R0(imm)) - - inline void Assembler::SH4_movw_dispRy_R0(int imm, Register Ry) { - NanoAssert(1 && Ry <= 15 && SH4_CHECK_RANGE_movw_dispRy_R0(imm) && SH4_CHECK_ALIGN_movw_dispRy_R0(imm)); - SH4_emit16((0x8 << 12) | (0x5 << 8) | ((Ry & 0xF) << 4) | (((imm & 0x1E) >> 1) << 0)); - asm_output("movw_dispRy_R0 %d, R%d", imm, Ry); - } - -#define SH4_CHECK_RANGE_movw_dispGBR_R0(imm) ((imm) >= 0 && (imm) <= 510) - -#define SH4_CHECK_ALIGN_movw_dispGBR_R0(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_movw_dispGBR_R0(imm) (SH4_CHECK_RANGE_movw_dispGBR_R0(imm) && SH4_CHECK_ALIGN_movw_dispGBR_R0(imm)) - - inline void Assembler::SH4_movw_dispGBR_R0(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_movw_dispGBR_R0(imm) && SH4_CHECK_ALIGN_movw_dispGBR_R0(imm)); - SH4_emit16((0xC << 12) | (0x5 << 8) | (((imm & 0x1FE) >> 1) << 0)); - asm_output("movw_dispGBR_R0 %d", imm); - } - -#define SH4_CHECK_RANGE_movw_dispPC(imm) ((imm) >= 0 && (imm) <= 510) - -#define SH4_CHECK_ALIGN_movw_dispPC(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_movw_dispPC(imm) (SH4_CHECK_RANGE_movw_dispPC(imm) && SH4_CHECK_ALIGN_movw_dispPC(imm)) - - inline void Assembler::SH4_movw_dispPC(int imm, Register Rx) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_movw_dispPC(imm) && SH4_CHECK_ALIGN_movw_dispPC(imm)); - SH4_emit16((0x9 << 12) | ((Rx & 0xF) << 8) | (((imm & 0x1FE) >> 1) << 0)); - asm_output("movw_dispPC %d, R%d", imm, Rx); - } - - inline void Assembler::SH4_movw_dispR0Ry(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xD << 0)); - asm_output("movw_dispR0Ry R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movw_incRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x5 << 0)); - asm_output("movw_incRy R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_movw_indRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x1 << 0)); - asm_output("movw_indRy R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_movw_R0_dispRx(imm) ((imm) >= 0 && (imm) <= 30) - -#define SH4_CHECK_ALIGN_movw_R0_dispRx(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_movw_R0_dispRx(imm) (SH4_CHECK_RANGE_movw_R0_dispRx(imm) && SH4_CHECK_ALIGN_movw_R0_dispRx(imm)) - - inline void Assembler::SH4_movw_R0_dispRx(int imm, Register Rx) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_movw_R0_dispRx(imm) && SH4_CHECK_ALIGN_movw_R0_dispRx(imm)); - SH4_emit16((0x8 << 12) | (0x1 << 8) | ((Rx & 0xF) << 4) | (((imm & 0x1E) >> 1) << 0)); - asm_output("movw_R0_dispRx %d, R%d", imm, Rx); - } - -#define SH4_CHECK_RANGE_movw_R0_dispGBR(imm) ((imm) >= 0 && (imm) <= 510) - -#define SH4_CHECK_ALIGN_movw_R0_dispGBR(imm) (((imm) & 0x1) == 0) - -#define FITS_SH4_movw_R0_dispGBR(imm) (SH4_CHECK_RANGE_movw_R0_dispGBR(imm) && SH4_CHECK_ALIGN_movw_R0_dispGBR(imm)) - - inline void Assembler::SH4_movw_R0_dispGBR(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_movw_R0_dispGBR(imm) && SH4_CHECK_ALIGN_movw_R0_dispGBR(imm)); - SH4_emit16((0xC << 12) | (0x1 << 8) | (((imm & 0x1FE) >> 1) << 0)); - asm_output("movw_R0_dispGBR %d", imm); - } - -#define SH4_CHECK_RANGE_mova_dispPC_R0(imm) ((imm) >= 0 && (imm) <= 1020) - -#define SH4_CHECK_ALIGN_mova_dispPC_R0(imm) (((imm) & 0x3) == 0) - -#define FITS_SH4_mova_dispPC_R0(imm) (SH4_CHECK_RANGE_mova_dispPC_R0(imm) && SH4_CHECK_ALIGN_mova_dispPC_R0(imm)) - - inline void Assembler::SH4_mova_dispPC_R0(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_mova_dispPC_R0(imm) && SH4_CHECK_ALIGN_mova_dispPC_R0(imm)); - SH4_emit16((0xC << 12) | (0x7 << 8) | (((imm & 0x3FC) >> 2) << 0)); - asm_output("mova_dispPC_R0 %d", imm); - } - - inline void Assembler::SH4_movcal_R0_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0xC << 4) | (0x3 << 0)); - asm_output("movcal_R0_indRx R%d", Rx); - } - - inline void Assembler::SH4_movcol_R0_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x7 << 4) | (0x3 << 0)); - asm_output("movcol_R0_indRx R%d", Rx); - } - - inline void Assembler::SH4_movlil_indRy_R0(Register Ry) { - NanoAssert(1 && Ry <= 15); - SH4_emit16((0x0 << 12) | ((Ry & 0xF) << 8) | (0x6 << 4) | (0x3 << 0)); - asm_output("movlil_indRy_R0 R%d", Ry); - } - - inline void Assembler::SH4_movt(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x9 << 0)); - asm_output("movt R%d", Rx); - } - - inline void Assembler::SH4_movual_indRy_R0(Register Ry) { - NanoAssert(1 && Ry <= 15); - SH4_emit16((0x4 << 12) | ((Ry & 0xF) << 8) | (0xA << 4) | (0x9 << 0)); - asm_output("movual_indRy_R0 R%d", Ry); - } - - inline void Assembler::SH4_movual_incRy_R0(Register Ry) { - NanoAssert(1 && Ry <= 15); - SH4_emit16((0x4 << 12) | ((Ry & 0xF) << 8) | (0xE << 4) | (0x9 << 0)); - asm_output("movual_incRy_R0 R%d", Ry); - } - - inline void Assembler::SH4_mulsw(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xF << 0)); - asm_output("mulsw R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_muls(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xF << 0)); - asm_output("muls R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_mull(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x7 << 0)); - asm_output("mull R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_muluw(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xE << 0)); - asm_output("muluw R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_mulu(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xE << 0)); - asm_output("mulu R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_neg(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xB << 0)); - asm_output("neg R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_negc(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xA << 0)); - asm_output("negc R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_nop() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x0 << 4) | (0x9 << 0)); - asm_output("nop"); - } - - inline void Assembler::SH4_not(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x7 << 0)); - asm_output("not R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_ocbi_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x9 << 4) | (0x3 << 0)); - asm_output("ocbi_indRx R%d", Rx); - } - - inline void Assembler::SH4_ocbp_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0xA << 4) | (0x3 << 0)); - asm_output("ocbp_indRx R%d", Rx); - } - - inline void Assembler::SH4_ocbwb_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0xB << 4) | (0x3 << 0)); - asm_output("ocbwb_indRx R%d", Rx); - } - -#define SH4_CHECK_RANGE_or_imm_R0(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_or_imm_R0(imm) (SH4_CHECK_RANGE_or_imm_R0(imm)) - - inline void Assembler::SH4_or_imm_R0(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_or_imm_R0(imm)); - SH4_emit16((0xC << 12) | (0xB << 8) | ((imm & 0xFF) << 0)); - asm_output("or_imm_R0 %d", imm); - } - - inline void Assembler::SH4_or(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xB << 0)); - asm_output("or R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_orb_imm_dispR0GBR(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_orb_imm_dispR0GBR(imm) (SH4_CHECK_RANGE_orb_imm_dispR0GBR(imm)) - - inline void Assembler::SH4_orb_imm_dispR0GBR(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_orb_imm_dispR0GBR(imm)); - SH4_emit16((0xC << 12) | (0xF << 8) | ((imm & 0xFF) << 0)); - asm_output("orb_imm_dispR0GBR %d", imm); - } - - inline void Assembler::SH4_pref_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x8 << 4) | (0x3 << 0)); - asm_output("pref_indRx R%d", Rx); - } - - inline void Assembler::SH4_prefi_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0xD << 4) | (0x3 << 0)); - asm_output("prefi_indRx R%d", Rx); - } - - inline void Assembler::SH4_rotcl(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x4 << 0)); - asm_output("rotcl R%d", Rx); - } - - inline void Assembler::SH4_rotcr(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x5 << 0)); - asm_output("rotcr R%d", Rx); - } - - inline void Assembler::SH4_rotl(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x4 << 0)); - asm_output("rotl R%d", Rx); - } - - inline void Assembler::SH4_rotr(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x5 << 0)); - asm_output("rotr R%d", Rx); - } - - inline void Assembler::SH4_rte() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x2 << 4) | (0xB << 0)); - asm_output("rte"); - } - - inline void Assembler::SH4_rts() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x0 << 4) | (0xB << 0)); - asm_output("rts"); - } - - inline void Assembler::SH4_sets() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x5 << 4) | (0x8 << 0)); - asm_output("sets"); - } - - inline void Assembler::SH4_sett() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x1 << 4) | (0x8 << 0)); - asm_output("sett"); - } - - inline void Assembler::SH4_shad(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xC << 0)); - asm_output("shad R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_shld(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xD << 0)); - asm_output("shld R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_shal(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x0 << 0)); - asm_output("shal R%d", Rx); - } - - inline void Assembler::SH4_shar(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x1 << 0)); - asm_output("shar R%d", Rx); - } - - inline void Assembler::SH4_shll(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x0 << 0)); - asm_output("shll R%d", Rx); - } - - inline void Assembler::SH4_shll16(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x8 << 0)); - asm_output("shll16 R%d", Rx); - } - - inline void Assembler::SH4_shll2(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x8 << 0)); - asm_output("shll2 R%d", Rx); - } - - inline void Assembler::SH4_shll8(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x8 << 0)); - asm_output("shll8 R%d", Rx); - } - - inline void Assembler::SH4_shlr(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x1 << 0)); - asm_output("shlr R%d", Rx); - } - - inline void Assembler::SH4_shlr16(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x9 << 0)); - asm_output("shlr16 R%d", Rx); - } - - inline void Assembler::SH4_shlr2(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x9 << 0)); - asm_output("shlr2 R%d", Rx); - } - - inline void Assembler::SH4_shlr8(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x9 << 0)); - asm_output("shlr8 R%d", Rx); - } - - inline void Assembler::SH4_sleep() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0x1 << 4) | (0xB << 0)); - asm_output("sleep"); - } - - inline void Assembler::SH4_stc_SR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x2 << 0)); - asm_output("stc_SR R%d", Rx); - } - - inline void Assembler::SH4_stc_GBR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x2 << 0)); - asm_output("stc_GBR R%d", Rx); - } - - inline void Assembler::SH4_stc_VBR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x2 << 0)); - asm_output("stc_VBR R%d", Rx); - } - - inline void Assembler::SH4_stc_SSR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x3 << 4) | (0x2 << 0)); - asm_output("stc_SSR R%d", Rx); - } - - inline void Assembler::SH4_stc_SPC(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x4 << 4) | (0x2 << 0)); - asm_output("stc_SPC R%d", Rx); - } - - inline void Assembler::SH4_stc_SGR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x3 << 4) | (0xA << 0)); - asm_output("stc_SGR R%d", Rx); - } - - inline void Assembler::SH4_stc_DBR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0xF << 4) | (0xA << 0)); - asm_output("stc_DBR R%d", Rx); - } - -#define SH4_CHECK_RANGE_stc_bank(imm) ((imm) >= 0 && (imm) <= 7) - -#define FITS_SH4_stc_bank(imm) (SH4_CHECK_RANGE_stc_bank(imm)) - - inline void Assembler::SH4_stc_bank(int imm, Register Rx) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_stc_bank(imm)); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | ((imm & 0x7) << 4) | (0x2 << 0)); - asm_output("stc_bank %d, R%d", imm, Rx); - } - - inline void Assembler::SH4_stcl_SR_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x3 << 0)); - asm_output("stcl_SR_decRx R%d", Rx); - } - - inline void Assembler::SH4_stcl_VBR_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x3 << 0)); - asm_output("stcl_VBR_decRx R%d", Rx); - } - - inline void Assembler::SH4_stcl_SSR_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x3 << 4) | (0x3 << 0)); - asm_output("stcl_SSR_decRx R%d", Rx); - } - - inline void Assembler::SH4_stcl_SPC_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x4 << 4) | (0x3 << 0)); - asm_output("stcl_SPC_decRx R%d", Rx); - } - - inline void Assembler::SH4_stcl_GBR_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x3 << 0)); - asm_output("stcl_GBR_decRx R%d", Rx); - } - - inline void Assembler::SH4_stcl_SGR_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x3 << 4) | (0x2 << 0)); - asm_output("stcl_SGR_decRx R%d", Rx); - } - - inline void Assembler::SH4_stcl_DBR_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0xF << 4) | (0x2 << 0)); - asm_output("stcl_DBR_decRx R%d", Rx); - } - -#define SH4_CHECK_RANGE_stcl_bank_decRx(imm) ((imm) >= 0 && (imm) <= 7) - -#define FITS_SH4_stcl_bank_decRx(imm) (SH4_CHECK_RANGE_stcl_bank_decRx(imm)) - - inline void Assembler::SH4_stcl_bank_decRx(int imm, Register Rx) { - NanoAssert(1 && Rx <= 15 && SH4_CHECK_RANGE_stcl_bank_decRx(imm)); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | ((imm & 0x7) << 4) | (0x3 << 0)); - asm_output("stcl_bank_decRx %d, R%d", imm, Rx); - } - - inline void Assembler::SH4_sts_MACH(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0xA << 0)); - asm_output("sts_MACH R%d", Rx); - } - - inline void Assembler::SH4_sts_MACL(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0xA << 0)); - asm_output("sts_MACL R%d", Rx); - } - - inline void Assembler::SH4_sts_PR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0xA << 0)); - asm_output("sts_PR R%d", Rx); - } - - inline void Assembler::SH4_sts_FPUL(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x5 << 4) | (0xA << 0)); - asm_output("sts_FPUL R%d", Rx); - } - - inline void Assembler::SH4_sts_FPSCR(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x6 << 4) | (0xA << 0)); - asm_output("sts_FPSCR R%d", Rx); - } - - inline void Assembler::SH4_stsl_MACH_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x2 << 0)); - asm_output("stsl_MACH_decRx R%d", Rx); - } - - inline void Assembler::SH4_stsl_MACL_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x2 << 0)); - asm_output("stsl_MACL_decRx R%d", Rx); - } - - inline void Assembler::SH4_stsl_PR_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x2 << 0)); - asm_output("stsl_PR_decRx R%d", Rx); - } - - inline void Assembler::SH4_stsl_FPUL_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x5 << 4) | (0x2 << 0)); - asm_output("stsl_FPUL_decRx R%d", Rx); - } - - inline void Assembler::SH4_stsl_FPSCR_decRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x6 << 4) | (0x2 << 0)); - asm_output("stsl_FPSCR_decRx R%d", Rx); - } - - inline void Assembler::SH4_sub(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x8 << 0)); - asm_output("sub R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_subc(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xA << 0)); - asm_output("subc R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_subv(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xB << 0)); - asm_output("subv R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_swapb(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x8 << 0)); - asm_output("swapb R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_swapw(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x6 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x9 << 0)); - asm_output("swapw R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_synco() { - NanoAssert(1); - SH4_emit16((0x0 << 12) | (0x0 << 8) | (0xA << 4) | (0xB << 0)); - asm_output("synco"); - } - - inline void Assembler::SH4_tasb_indRx(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0xB << 0)); - asm_output("tasb_indRx R%d", Rx); - } - -#define SH4_CHECK_RANGE_trapa_imm(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_trapa_imm(imm) (SH4_CHECK_RANGE_trapa_imm(imm)) - - inline void Assembler::SH4_trapa_imm(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_trapa_imm(imm)); - SH4_emit16((0xC << 12) | (0x3 << 8) | ((imm & 0xFF) << 0)); - asm_output("trapa_imm %d", imm); - } - -#define SH4_CHECK_RANGE_tst_imm_R0(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_tst_imm_R0(imm) (SH4_CHECK_RANGE_tst_imm_R0(imm)) - - inline void Assembler::SH4_tst_imm_R0(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_tst_imm_R0(imm)); - SH4_emit16((0xC << 12) | (0x8 << 8) | ((imm & 0xFF) << 0)); - asm_output("tst_imm_R0 %d", imm); - } - - inline void Assembler::SH4_tst(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x8 << 0)); - asm_output("tst R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_tstb_imm_dispR0GBR(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_tstb_imm_dispR0GBR(imm) (SH4_CHECK_RANGE_tstb_imm_dispR0GBR(imm)) - - inline void Assembler::SH4_tstb_imm_dispR0GBR(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_tstb_imm_dispR0GBR(imm)); - SH4_emit16((0xC << 12) | (0xC << 8) | ((imm & 0xFF) << 0)); - asm_output("tstb_imm_dispR0GBR %d", imm); - } - -#define SH4_CHECK_RANGE_xor_imm_R0(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_xor_imm_R0(imm) (SH4_CHECK_RANGE_xor_imm_R0(imm)) - - inline void Assembler::SH4_xor_imm_R0(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_xor_imm_R0(imm)); - SH4_emit16((0xC << 12) | (0xA << 8) | ((imm & 0xFF) << 0)); - asm_output("xor_imm_R0 %d", imm); - } - - inline void Assembler::SH4_xor(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xA << 0)); - asm_output("xor R%d, R%d", Ry, Rx); - } - -#define SH4_CHECK_RANGE_xorb_imm_dispR0GBR(imm) ((imm) >= 0 && (imm) <= 255) - -#define FITS_SH4_xorb_imm_dispR0GBR(imm) (SH4_CHECK_RANGE_xorb_imm_dispR0GBR(imm)) - - inline void Assembler::SH4_xorb_imm_dispR0GBR(int imm) { - NanoAssert(1 && SH4_CHECK_RANGE_xorb_imm_dispR0GBR(imm)); - SH4_emit16((0xC << 12) | (0xE << 8) | ((imm & 0xFF) << 0)); - asm_output("xorb_imm_dispR0GBR %d", imm); - } - - inline void Assembler::SH4_xtrct(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x2 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xD << 0)); - asm_output("xtrct R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_dt(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x4 << 12) | ((Rx & 0xF) << 8) | (0x1 << 4) | (0x0 << 0)); - asm_output("dt R%d", Rx); - } - - inline void Assembler::SH4_dmulsl(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xD << 0)); - asm_output("dmulsl R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_dmulul(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x3 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x5 << 0)); - asm_output("dmulul R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_macl_incRy_incRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | ((Ry & 0xF) << 4) | (0xF << 0)); - asm_output("macl_incRy_incRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_braf(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x2 << 4) | (0x3 << 0)); - asm_output("braf R%d", Rx); - } - - inline void Assembler::SH4_bsrf(Register Rx) { - NanoAssert(1 && Rx <= 15); - SH4_emit16((0x0 << 12) | ((Rx & 0xF) << 8) | (0x0 << 4) | (0x3 << 0)); - asm_output("bsrf R%d", Rx); - } - - inline void Assembler::SH4_fabs(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x5 << 4) | (0xD << 0)); - asm_output("fabs R%d", Rx); - } - - inline void Assembler::SH4_fabs_double(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x5 << 4) | (0xD << 0)); - asm_output("fabs_double R%d", Rx); - } - - inline void Assembler::SH4_fadd(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x0 << 0)); - asm_output("fadd R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fadd_double(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1) && Ry >= 16 && (Ry - 16) <= 15 && !((Ry - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x0 << 0)); - asm_output("fadd_double R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fcmpeq(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x4 << 0)); - asm_output("fcmpeq R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fcmpeq_double(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1) && Ry >= 16 && (Ry - 16) <= 15 && !((Ry - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x4 << 0)); - asm_output("fcmpeq_double R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fcmpgt(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x5 << 0)); - asm_output("fcmpgt R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fcmpgt_double(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1) && Ry >= 16 && (Ry - 16) <= 15 && !((Ry - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x5 << 0)); - asm_output("fcmpgt_double R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fcnvds_double_FPUL(Register Rx) { - NanoAssert(1 && !((Rx - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0xB << 4) | (0xD << 0)); - asm_output("fcnvds_double_FPUL R%d", Rx); - } - - inline void Assembler::SH4_fcnvsd_FPUL_double(Register Rx) { - NanoAssert(1 && !((Rx - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0xA << 4) | (0xD << 0)); - asm_output("fcnvsd_FPUL_double R%d", Rx); - } - - inline void Assembler::SH4_fdiv(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x3 << 0)); - asm_output("fdiv R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fdiv_double(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1) && Ry >= 16 && (Ry - 16) <= 15 && !((Ry - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x3 << 0)); - asm_output("fdiv_double R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fipr(Register Ry, Register Rx) { - NanoAssert(1 && !(((Rx - 16) & 0x3) || ((Ry - 16) & 0x3))); - SH4_emit16((0xF << 12) | (((((Rx - 16) & 0xF) << 2) | (((Ry - 16) & 0xF) >> 2)) << 8) | (0xE << 4) | (0xD << 0)); - asm_output("fipr R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fldi0(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x8 << 4) | (0xD << 0)); - asm_output("fldi0 R%d", Rx); - } - - inline void Assembler::SH4_fldi1(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x9 << 4) | (0xD << 0)); - asm_output("fldi1 R%d", Rx); - } - - inline void Assembler::SH4_flds_FPUL(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x1 << 4) | (0xD << 0)); - asm_output("flds_FPUL R%d", Rx); - } - - inline void Assembler::SH4_float_FPUL(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x2 << 4) | (0xD << 0)); - asm_output("float_FPUL R%d", Rx); - } - - inline void Assembler::SH4_float_FPUL_double(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x2 << 4) | (0xD << 0)); - asm_output("float_FPUL_double R%d", Rx); - } - - inline void Assembler::SH4_fmac(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xE << 0)); - asm_output("fmac R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xC << 0)); - asm_output("fmov R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_Xdouble_Xdouble(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xC << 0)); - asm_output("fmov_Xdouble_Xdouble R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_indRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x8 << 0)); - asm_output("fmov_indRy R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_indRy_Xdouble(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x8 << 0)); - asm_output("fmov_indRy_Xdouble R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_indRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xA << 0)); - asm_output("fmov_indRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_Xdouble_indRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xA << 0)); - asm_output("fmov_Xdouble_indRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_incRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x9 << 0)); - asm_output("fmov_incRy R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_incRy_Xdouble(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x9 << 0)); - asm_output("fmov_incRy_Xdouble R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_decRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xB << 0)); - asm_output("fmov_decRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_Xdouble_decRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xB << 0)); - asm_output("fmov_Xdouble_decRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_dispR0Ry(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x6 << 0)); - asm_output("fmov_dispR0Ry R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_dispR0Ry_Xdouble(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x6 << 0)); - asm_output("fmov_dispR0Ry_Xdouble R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_dispR0Rx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x7 << 0)); - asm_output("fmov_dispR0Rx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmov_Xdouble_dispR0Rx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x7 << 0)); - asm_output("fmov_Xdouble_dispR0Rx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovd_indRy_Xdouble(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x8 << 0)); - asm_output("fmovd_indRy_Xdouble R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovd_Xdouble_indRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xA << 0)); - asm_output("fmovd_Xdouble_indRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovd_incRy_Xdouble(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x9 << 0)); - asm_output("fmovd_incRy_Xdouble R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovd_Xdouble_decRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xB << 0)); - asm_output("fmovd_Xdouble_decRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovd_dispR0Ry_Xdouble(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x6 << 0)); - asm_output("fmovd_dispR0Ry_Xdouble R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovd_Xdouble_dispR0Rx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x7 << 0)); - asm_output("fmovd_Xdouble_dispR0Rx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovs_indRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x8 << 0)); - asm_output("fmovs_indRy R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovs_indRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xA << 0)); - asm_output("fmovs_indRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovs_incRy(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x9 << 0)); - asm_output("fmovs_incRy R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovs_decRx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0xB << 0)); - asm_output("fmovs_decRx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovs_dispR0Ry(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | ((Ry & 0xF) << 4) | (0x6 << 0)); - asm_output("fmovs_dispR0Ry R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmovs_dispR0Rx(Register Ry, Register Rx) { - NanoAssert(1 && Rx <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | ((Rx & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x7 << 0)); - asm_output("fmovs_dispR0Rx R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmul(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x2 << 0)); - asm_output("fmul R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fmul_double(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1) && Ry >= 16 && (Ry - 16) <= 15 && !((Ry - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x2 << 0)); - asm_output("fmul_double R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fneg(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x4 << 4) | (0xD << 0)); - asm_output("fneg R%d", Rx); - } - - inline void Assembler::SH4_fneg_double(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x4 << 4) | (0xD << 0)); - asm_output("fneg_double R%d", Rx); - } - - inline void Assembler::SH4_fpchg() { - NanoAssert(1); - SH4_emit16((0xF << 12) | (0x7 << 8) | (0xF << 4) | (0xD << 0)); - asm_output("fpchg"); - } - - inline void Assembler::SH4_frchg() { - NanoAssert(1); - SH4_emit16((0xF << 12) | (0xB << 8) | (0xF << 4) | (0xD << 0)); - asm_output("frchg"); - } - - inline void Assembler::SH4_fsca_FPUL_double(Register Rx) { - NanoAssert(1 && !((Rx - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0xF << 4) | (0xD << 0)); - asm_output("fsca_FPUL_double R%d", Rx); - } - - inline void Assembler::SH4_fschg() { - NanoAssert(1); - SH4_emit16((0xF << 12) | (0x3 << 8) | (0xF << 4) | (0xD << 0)); - asm_output("fschg"); - } - - inline void Assembler::SH4_fsqrt(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x6 << 4) | (0xD << 0)); - asm_output("fsqrt R%d", Rx); - } - - inline void Assembler::SH4_fsqrt_double(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x6 << 4) | (0xD << 0)); - asm_output("fsqrt_double R%d", Rx); - } - - inline void Assembler::SH4_fsrra(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x7 << 4) | (0xD << 0)); - asm_output("fsrra R%d", Rx); - } - - inline void Assembler::SH4_fsts_FPUL(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x0 << 4) | (0xD << 0)); - asm_output("fsts_FPUL R%d", Rx); - } - - inline void Assembler::SH4_fsub(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && Ry >= 16 && (Ry - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x1 << 0)); - asm_output("fsub R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_fsub_double(Register Ry, Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1) && Ry >= 16 && (Ry - 16) <= 15 && !((Ry - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (((Ry - 16) & 0xF) << 4) | (0x1 << 0)); - asm_output("fsub_double R%d, R%d", Ry, Rx); - } - - inline void Assembler::SH4_ftrc_FPUL(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x3 << 4) | (0xD << 0)); - asm_output("ftrc_FPUL R%d", Rx); - } - - inline void Assembler::SH4_ftrc_double_FPUL(Register Rx) { - NanoAssert(1 && Rx >= 16 && (Rx - 16) <= 15 && !((Rx - 16) & 0x1)); - SH4_emit16((0xF << 12) | (((Rx - 16) & 0xF) << 8) | (0x3 << 4) | (0xD << 0)); - asm_output("ftrc_double_FPUL R%d", Rx); - } - - inline void Assembler::SH4_ftrv(Register Rx) { - NanoAssert(1 && !((Rx - 16) & 0x3)); - SH4_emit16((0xF << 12) | (((((Rx - 16) & 0xF) << 2) | 0x1) << 8) | (0xF << 4) | (0xD << 0)); - asm_output("ftrv R%d", Rx); - } - - /* End of the auto-generated part. */ - /***********************************/ +#include "NativeSH4-auto-generated.h" #define SH4_movl_PCrel(address, reg) \ SH4_movl_dispPC(((uint32_t)(address) - (((uint32_t)_nIns) & ~0x3)), reg) @@ -1970,7 +278,7 @@ namespace nanojit // just have to copy it to the right register. underrunProtect(2 * sizeof(NIns)); SH4_fmov(arg->getReg(), reg); - SH4_fmov((Register)(arg->getReg() + 1), (Register)(reg + 1)); + SH4_fmov(arg->getReg() + 1, reg + 1); } else { // This argument is not assigned to a register, thus @@ -2160,7 +468,7 @@ namespace nanojit NIns *after_mov_true = _nIns; if (inst->isop(LIR_cmovd)) { SH4_fmov(src_true_reg, dest_reg); - SH4_fmov(Register(src_true_reg + 1), Register(dest_reg + 1)); + SH4_fmov(src_true_reg + 1, dest_reg + 1); } else { SH4_mov(src_true_reg, dest_reg); @@ -2175,7 +483,7 @@ namespace nanojit if (inst->isop(LIR_cmovd)) { SH4_fmov(src_false_reg, dest_reg); - SH4_fmov(Register(src_false_reg + 1), Register(dest_reg + 1)); + SH4_fmov(src_false_reg + 1, dest_reg + 1); } else { SH4_mov(src_false_reg, dest_reg); @@ -2237,7 +545,7 @@ namespace nanojit if (result_reg != operand_reg) { underrunProtect(2 * sizeof(NIns)); SH4_fmov(operand_reg, result_reg); - SH4_fmov(Register(operand_reg + 1), Register(result_reg + 1)); + SH4_fmov(operand_reg + 1, result_reg + 1); } freeResourcesOf(inst); @@ -2275,7 +583,7 @@ namespace nanojit // 1. operand1 -> result if (result_reg != operand1_reg) { SH4_fmov(operand1_reg, result_reg); - SH4_fmov(Register(operand1_reg + 1), Register(result_reg + 1)); + SH4_fmov(operand1_reg + 1, result_reg + 1); } freeResourcesOf(inst); @@ -2357,7 +665,7 @@ namespace nanojit // 2. Load the "double" from @Rtemp. SH4_fmovs_indRy(Rtemp, result_reg); - SH4_fmovs_incRy(Rtemp, Register(result_reg + 1)); + SH4_fmovs_incRy(Rtemp, result_reg + 1); // 1. base + offset -> Rtemp. asm_base_offset(offset, base_reg); @@ -2521,7 +829,7 @@ namespace nanojit underrunProtect(2 * sizeof(NIns)); SH4_fmov(src_reg, dest_reg); - SH4_fmov(Register(src_reg + 1), Register(dest_reg + 1)); + SH4_fmov(src_reg + 1, dest_reg + 1); } void Assembler::asm_param(LIns *inst) { @@ -2561,7 +869,7 @@ namespace nanojit // 8. Load the "double" constant from @Rtemp. SH4_fmovs_indRy(Rtemp, result_reg); - SH4_fmovs_incRy(Rtemp, Register(result_reg + 1)); + SH4_fmovs_incRy(Rtemp, result_reg + 1); // 7. Restore R0 from the stack. SH4_movl_incRy(SP, R0); @@ -2853,7 +1161,7 @@ namespace nanojit underrunProtect(2 * sizeof(NIns)); // 2. Store the "double" to @Rtemp. - SH4_fmovs_decRx(Register(value_reg + 1), Rtemp); + SH4_fmovs_decRx(value_reg + 1, Rtemp); SH4_fmovs_decRx(value_reg, Rtemp); // Adjust the offset since we are using a post decrement (by 4) indirect loads. @@ -3184,17 +1492,17 @@ namespace nanojit } Register Assembler::nRegisterAllocFromSet(RegisterMask set) { - int reg = 0; + Register reg; // Find the first register in this set. reg = lsReg(set); - _allocator.free &= ~rmask((Register)reg); + _allocator.free &= ~rmask(reg); // Sanity check. - NanoAssert((rmask((Register)reg) & set) == rmask((Register)reg)); + NanoAssert((rmask(reg) & set) == rmask(reg)); - return (Register)reg; + return reg; } void Assembler::nRegisterResetAll(RegAlloc& regs) { diff --git a/js/src/nanojit/NativeSH4.h b/js/src/nanojit/NativeSH4.h index 35533a091759..ed51c3612c3f 100644 --- a/js/src/nanojit/NativeSH4.h +++ b/js/src/nanojit/NativeSH4.h @@ -40,6 +40,8 @@ #ifndef __nanojit_NativeSH4__ #define __nanojit_NativeSH4__ +#include "NativeCommon.h" + namespace nanojit { /*********************************************************************** @@ -47,64 +49,63 @@ namespace nanojit */ // General purpose and ABI registers. - typedef uint32_t Register; - static const Register - // Scratch registers (a.k.a caller-saved, a.k.a local). - R0 = { 0 }, - R1 = { 1 }, - R2 = { 2 }, - R3 = { 3 }, // Excluded from the regalloc because of its use as a hyper-scratch. - R4 = { 4 }, - R5 = { 5 }, - R6 = { 6 }, - R7 = { 7 }, - // Saved registers (a.k.a callee-saved, a.k.a global). - R8 = { 8 }, - R9 = { 9 }, - R10 = { 10 }, - R11 = { 11 }, - R12 = { 12 }, - R13 = { 13 }, + // Scratch registers (a.k.a caller-saved, a.k.a local). + static const Register R0 = { 0 }; + static const Register R1 = { 1 }; + static const Register R2 = { 2 }; + static const Register R3 = { 3 }; // Excluded from the regalloc because of its use as a hyper-scratch. + static const Register R4 = { 4 }; + static const Register R5 = { 5 }; + static const Register R6 = { 6 }; + static const Register R7 = { 7 }; - // ABI registers, excluded from the register allocation. - FP = { 14 }, - SP = { 15 }, + // Saved registers (a.k.a callee-saved, a.k.a global). + static const Register R8 = { 8 }; + static const Register R9 = { 9 }; + static const Register R10 = { 10 }; + static const Register R11 = { 11 }; + static const Register R12 = { 12 }; + static const Register R13 = { 13 }; - // Floatting-point registers. - _D0 = { 16 }, - _F0 = _D0, - _F1 = { 17 }, - _D1 = { 18 }, - _F2 = _D1, - _F3 = { 19 }, - _D2 = { 20 }, - _F4 = _D2, - _F5 = { 21 }, - _D3 = { 22 }, - _F6 = _D3, - _F7 = { 23 }, - _D4 = { 24 }, - _F8 = _D4, - _F9 = { 25 }, - _D5 = { 26 }, - _F10 = _D5, - _F11 = { 27 }, - _D6 = { 28 }, - _F12 = _D6, - _F13 = { 29 }, - _D7 = { 30 }, - _F14 = _D7, // Excluded from the regalloc because of its use as a hyper-scratch. - _F15 = { 31 }, + // ABI registers, excluded from the register allocation. + static const Register FP = { 14 }; + static const Register SP = { 15 }; - // Helpers. - deprecated_UnknownReg = { 32 }, - UnspecifiedReg = { 32 }, - Rtemp = R3, - Dtemp = _D7; + // Floatting-point registers. + static const Register _D0 = { 16 }; + static const Register _F0 = _D0; + static const Register _F1 = { 17 }; + static const Register _D1 = { 18 }; + static const Register _F2 = _D1; + static const Register _F3 = { 19 }; + static const Register _D2 = { 20 }; + static const Register _F4 = _D2; + static const Register _F5 = { 21 }; + static const Register _D3 = { 22 }; + static const Register _F6 = _D3; + static const Register _F7 = { 23 }; + static const Register _D4 = { 24 }; + static const Register _F8 = _D4; + static const Register _F9 = { 25 }; + static const Register _D5 = { 26 }; + static const Register _F10 = _D5; + static const Register _F11 = { 27 }; + static const Register _D6 = { 28 }; + static const Register _F12 = _D6; + static const Register _F13 = { 29 }; + static const Register _D7 = { 30 }; + static const Register _F14 = _D7; // Excluded from the regalloc because of its use as a hyper-scratch. + static const Register _F15 = { 31 }; - static const uint32_t FirstRegNum = R0; - static const uint32_t LastRegNum = _D7; + // Helpers. + static const Register deprecated_UnknownReg = { 32 }; + static const Register UnspecifiedReg = { 32 }; + static const Register Rtemp = R3; + static const Register Dtemp = _D7; + + static const uint32_t FirstRegNum = 0; + static const uint32_t LastRegNum = 30; } #define NJ_USE_UINT32_REGISTER 1 @@ -116,14 +117,14 @@ namespace nanojit typedef uint32_t RegisterMask; static const int NumSavedRegs = 6; - static const RegisterMask SavedRegs = ((1<