mirror of
https://github.com/radareorg/radare2.git
synced 2025-01-24 06:48:46 +00:00
1351 lines
54 KiB
Plaintext
1351 lines
54 KiB
Plaintext
# PPC
|
|
# r0 = call arg, return value
|
|
# r1 = stack pointer
|
|
# r2 = rtoc (register table of contents) (like a5 68k reg, not used, global to func (if i dont call a func))
|
|
# r3-r10 - general purpose registers
|
|
add.=Add
|
|
add=Add
|
|
addc.=Add Carrying
|
|
addc=Add Carrying
|
|
addco.=Add Carrying
|
|
addco=Add Carrying
|
|
adde.=Add Extended
|
|
adde=Add Extended
|
|
addeo.=Add Extended
|
|
addeo=Add Extended
|
|
addex=Add Extended using alternate carry
|
|
addg6s=Add & Generate Sixes
|
|
addi=Add Immediate
|
|
addic.=Add Immediate Carrying & record
|
|
addic=Add Immediate Carrying
|
|
addis=Add Immediate Shifted
|
|
addme.=Add to Minus One Extended
|
|
addme=Add to Minus One Extended
|
|
addmeo.=Add to Minus One Extended
|
|
addmeo=Add to Minus One Extended
|
|
addo.=Add
|
|
addo=Add
|
|
addpcis=Add PC Immediate Shifted
|
|
addze.=Add to Zero Extended
|
|
addze=Add to Zero Extended
|
|
addzeo.=Add to Zero Extended
|
|
addzeo=Add to Zero Extended
|
|
and.=AND
|
|
and=AND
|
|
andc.=AND with Complement
|
|
andc=AND with Complement
|
|
andi.=AND Immediate & record
|
|
andis.=AND Immediate Shifted & record
|
|
b=Branch [& Link] [Absolute]
|
|
ba=Branch [& Link] [Absolute]
|
|
bc=Branch Conditional [& Link] [Absolute]
|
|
bca=Branch Conditional [& Link] [Absolute]
|
|
bcctr=Branch Conditional to CTR [& Link]
|
|
bcctrl=Branch Conditional to CTR [& Link]
|
|
bcdadd.=Decimal Add Modulo & record
|
|
bcdcfn.=Decimal Convert From National & record
|
|
bcdcfsq.=Decimal Convert From Signed Quadword & record
|
|
bcdcfz.=Decimal Convert From Zoned & record
|
|
bcdcpsgn.=Decimal CopySign & record
|
|
bcdctn.=Decimal Convert To National & record
|
|
bcdctsq.=Decimal Convert To Signed Quadword & record
|
|
bcdctz.=Decimal Convert To Zoned & record
|
|
bcds.=Decimal Shift & record
|
|
bcdsetsgn.=Decimal Set Sign & record
|
|
bcdsr.=Decimal Shift & Round & record
|
|
bcdsub.=Decimal Subtract Modulo & record
|
|
bcdtrunc.=Decimal Truncate & record
|
|
bcdus.=Decimal Unsigned Shift & record
|
|
bcdutrunc.=Decimal Unsigned Truncate & record
|
|
bcl=Branch Conditional [& Link] [Absolute]
|
|
bcla=Branch Conditional [& Link] [Absolute]
|
|
bclr=Branch Conditional to LR [& Link]
|
|
bclrl=Branch Conditional to LR [& Link]
|
|
bctar=Branch Conditional to BTAR [& Link]
|
|
bctarl=Branch Conditional to BTAR [& Link]
|
|
bl=Branch [& Link] [Absolute]
|
|
bla=Branch [& Link] [Absolute]
|
|
bpermd=Bit Permute Doubleword
|
|
cbcdtd=Convert Binary Coded Decimal To Declets
|
|
cdtbcd=Convert Declets To Binary Coded Decimal
|
|
clrbhrb=Clear BHRB
|
|
cmp=Compare
|
|
cmpb=Compare Byte
|
|
cmpeqb=Compare Equal Byte
|
|
cmpi=Compare Immediate
|
|
cmpl=Compare Logical
|
|
cmpli=Compare Logical Immediate
|
|
cmprb=Compare Ranged Byte
|
|
cntlzd.=Count Leading Zeros Doubleword
|
|
cntlzd=Count Leading Zeros Doubleword
|
|
cntlzw.=Count Leading Zeros Word
|
|
cntlzw=Count Leading Zeros Word
|
|
cnttzd.=Count Trailing Zeros Doubleword
|
|
cnttzd=Count Trailing Zeros Doubleword
|
|
cnttzw.=Count Trailing Zeros Word
|
|
cnttzw=Count Trailing Zeros Word
|
|
copy=Copy
|
|
cp_abort=CP_Abort
|
|
crand=CR AND
|
|
crandc=CR AND with Complement
|
|
creqv=CR Equivalent
|
|
crnand=CR NAND
|
|
crnor=CR NOR
|
|
cror=CR OR
|
|
crorc=CR OR with Complement
|
|
crxor=Conditional Register bit XOR
|
|
dadd.=DFP Add
|
|
dadd=DFP Add
|
|
daddq.=DFP Add Quad
|
|
daddq=DFP Add Quad
|
|
darn=Deliver A Random Number
|
|
dcbf=Data Cache Block Flush
|
|
dcbst=Data Cache Block Store
|
|
dcbt=Data Cache Block Touch
|
|
dcbtst=Data Cache Block Touch for Store
|
|
dcbz=Data Cache Block Zero
|
|
dcffix.=DFP Convert From Fixed
|
|
dcffix=DFP Convert From Fixed
|
|
dcffixq.=DFP Convert From Fixed Quad
|
|
dcffixq=DFP Convert From Fixed Quad
|
|
dcmpo=DFP Compare Ordered
|
|
dcmpoq=DFP Compare Ordered Quad
|
|
dcmpu=DFP Compare Unordered
|
|
dcmpuq=DFP Compare Unordered Quad
|
|
dctdp.=DFP Convert To DFP Long
|
|
dctdp=DFP Convert To DFP Long
|
|
dctfix.=DFP Convert To Fixed
|
|
dctfix=DFP Convert To Fixed
|
|
dctfixq.=DFP Convert To Fixed Quad
|
|
dctfixq=DFP Convert To Fixed Quad
|
|
dctqpq.=DFP Convert To DFP Extended
|
|
dctqpq=DFP Convert To DFP Extended
|
|
ddedpd.=DFP Decode DPD To BCD
|
|
ddedpd=DFP Decode DPD To BCD
|
|
ddedpdq.=DFP Decode DPD To BCD Quad
|
|
ddedpdq=DFP Decode DPD To BCD Quad
|
|
ddiv.=DFP Divide
|
|
ddiv=DFP Divide
|
|
ddivq.=DFP Divide Quad
|
|
ddivq=DFP Divide Quad
|
|
denbcd.=DFP Encode BCD To DPD
|
|
denbcd=DFP Encode BCD To DPD
|
|
denbcdq.=DFP Encode BCD To DPD Quad
|
|
denbcdq=DFP Encode BCD To DPD Quad
|
|
diex.=DFP Insert Exponent
|
|
diex=DFP Insert Exponent
|
|
diexq.=DFP Insert Exponent Quad
|
|
diexq=DFP Insert Exponent Quad
|
|
divd.=Divide Doubleword
|
|
divd=Divide Doubleword
|
|
divde.=Divide Doubleword Extended
|
|
divde=Divide Doubleword Extended
|
|
divdeo.=Divide Doubleword Extended
|
|
divdeo=Divide Doubleword Extended
|
|
divdeu.=Divide Doubleword Extended Unsigned
|
|
divdeu=Divide Doubleword Extended Unsigned
|
|
divdeuo.=Divide Doubleword Extended Unsigned
|
|
divdeuo=Divide Doubleword Extended Unsigned
|
|
divdo.=Divide Doubleword
|
|
divdo=Divide Doubleword
|
|
divdu.=Divide Doubleword Unsigned
|
|
divdu=Divide Doubleword Unsigned
|
|
divduo.=Divide Doubleword Unsigned
|
|
divduo=Divide Doubleword Unsigned
|
|
divw.=Divide Word
|
|
divw=Divide Word
|
|
divwe.=Divide Word Extended
|
|
divwe=Divide Word Extended
|
|
divweo.=Divide Word Extended
|
|
divweo=Divide Word Extended
|
|
divweu.=Divide Word Extended Unsigned
|
|
divweu=Divide Word Extended Unsigned
|
|
divweuo.=Divide Word Extended Unsigned
|
|
divweuo=Divide Word Extended Unsigned
|
|
divwo.=Divide Word
|
|
divwo=Divide Word
|
|
divwu.=Divide Word Unsigned
|
|
divwu=Divide Word Unsigned
|
|
divwuo.=Divide Word Unsigned
|
|
divwuo=Divide Word Unsigned
|
|
dmul.=DFP Multiply
|
|
dmul=DFP Multiply
|
|
dmulq.=DFP Multiply Quad
|
|
dmulq=DFP Multiply Quad
|
|
dqua.=DFP Quantize
|
|
dqua=DFP Quantize
|
|
dquai.=DFP Quantize Immediate
|
|
dquai=DFP Quantize Immediate
|
|
dquaiq.=DFP Quantize Immediate Quad
|
|
dquaiq=DFP Quantize Immediate Quad
|
|
dquaq.=DFP Quantize Quad
|
|
dquaq=DFP Quantize Quad
|
|
drdpq.=DFP Round To DFP Long
|
|
drdpq=DFP Round To DFP Long
|
|
drintn.=DFP Round To FP Integer Without Inexact
|
|
drintn=DFP Round To FP Integer Without Inexact
|
|
drintnq.=DFP Round To FP Integer Without Inexact Quad
|
|
drintnq=DFP Round To FP Integer Without Inexact Quad
|
|
drintx.=DFP Round To FP Integer With Inexact
|
|
drintx=DFP Round To FP Integer With Inexact
|
|
drintxq.=DFP Round To FP Integer With Inexact Quad
|
|
drintxq=DFP Round To FP Integer With Inexact Quad
|
|
drrnd.=DFP Reround
|
|
drrnd=DFP Reround
|
|
drrndq.=DFP Reround Quad
|
|
drrndq=DFP Reround Quad
|
|
drsp.=DFP Round To DFP Short
|
|
drsp=DFP Round To DFP Short
|
|
dscli.=DFP Shift Significand Left Immediate
|
|
dscli=DFP Shift Significand Left Immediate
|
|
dscliq.=DFP Shift Significand Left Immediate Quad
|
|
dscliq=DFP Shift Significand Left Immediate Quad
|
|
dscri.=DFP Shift Significand Right Immediate
|
|
dscri=DFP Shift Significand Right Immediate
|
|
dscriq.=DFP Shift Significand Right Immediate Quad
|
|
dscriq=DFP Shift Significand Right Immediate Quad
|
|
dsub.=DFP Subtract
|
|
dsub=DFP Subtract
|
|
dsubq.=DFP Subtract Quad
|
|
dsubq=DFP Subtract Quad
|
|
dtstdc=DFP Test Data Class
|
|
dtstdcq=DFP Test Data Class Quad
|
|
dtstdg=DFP Test Data Group
|
|
dtstdgq=DFP Test Data Group Quad
|
|
dtstex=DFP Test Exponent
|
|
dtstexq=DFP Test Exponent Quad
|
|
dtstsf=DFP Test Significance
|
|
dtstsfi=DFP Test Significance Immediate
|
|
dtstsfiq=DFP Test Significance Immediate Quad
|
|
dtstsfq=DFP Test Significance Quad
|
|
dxex.=DFP Extract Exponent
|
|
dxex=DFP Extract Exponent
|
|
dxexq.=DFP Extract Exponent Quad
|
|
dxexq=DFP Extract Exponent Quad
|
|
eieio=Enforce In-order Execution of I/O
|
|
eqv.=Equivalent
|
|
eqv=Equivalent
|
|
extsb.=Extend Sign Byte
|
|
extsb=Extend Sign Byte
|
|
extsh.=Extend Sign Halfword
|
|
extsh=Extend Sign Halfword
|
|
extsw.=Extend Sign Word
|
|
extsw=Extend Sign Word
|
|
extswsli.=Extend Sign Word & Shift Left Immediate
|
|
extswsli=Extend Sign Word & Shift Left Immediate
|
|
fabs.=Floating Absolute
|
|
fabs=Floating Absolute
|
|
fadd.=Floating Add
|
|
fadd=Floating Add
|
|
fadds.=Floating Add Single
|
|
fadds=Floating Add Single
|
|
fcfid.=Floating Convert with round Signed Doubleword to Double-Precision format
|
|
fcfid=Floating Convert with round Signed Doubleword to Double-Precision format
|
|
fcfids.=Floating Convert with round Signed Doubleword to Single-Precision format
|
|
fcfids=Floating Convert with round Signed Doubleword to Single-Precision format
|
|
fcfidu.=Floating Convert with round Unsigned Doubleword to Double-Precision format
|
|
fcfidu=Floating Convert with round Unsigned Doubleword to Double-Precision format
|
|
fcfidus.=Floating Convert with round Unsigned Doubleword to Single-Precision format
|
|
fcfidus=Floating Convert with round Unsigned Doubleword to Single-Precision format
|
|
fcmpo=Floating Compare Ordered
|
|
fcmpu=Floating Compare Unordered
|
|
fcpsgn.=Floating Copy Sign
|
|
fcpsgn=Floating Copy Sign
|
|
fctid.=Floating Convert with round Double-Precision To Signed Doubleword format
|
|
fctid=Floating Convert with round Double-Precision To Signed Doubleword format
|
|
fctidu.=Floating Convert with round Double-Precision To Unsigned Doubleword format
|
|
fctidu=Floating Convert with round Double-Precision To Unsigned Doubleword format
|
|
fctiduz.=Floating Convert with round to Zero Double-Precision To Unsigned Doubleword format
|
|
fctiduz=Floating Convert with round to Zero Double-Precision To Unsigned Doubleword format
|
|
fctidz.=Floating Convert with round to Zero Double-Precision To Signed Doubleword format
|
|
fctidz=Floating Convert with round to Zero Double-Precision To Signed Doubleword format
|
|
fctiw.=Floating Convert with round Double-Precision To Signed Word format
|
|
fctiw=Floating Convert with round Double-Precision To Signed Word format
|
|
fctiwu.=Floating Convert with round Double-Precision To Unsigned Word format
|
|
fctiwu=Floating Convert with round Double-Precision To Unsigned Word format
|
|
fctiwuz.=Floating Convert with round to Zero Double-Precision To Unsigned Word format
|
|
fctiwuz=Floating Convert with round to Zero Double-Precision To Unsigned Word format
|
|
fctiwz.=Floating Convert with round to Zero Double-Precision To Signed Word format
|
|
fctiwz=Floating Convert with round to Zero Double-Precision To Signed Word format
|
|
fdiv.=Floating Divide
|
|
fdiv=Floating Divide
|
|
fdivs.=Floating Divide Single
|
|
fdivs=Floating Divide Single
|
|
fmadd.=Floating Multiply-Add
|
|
fmadd=Floating Multiply-Add
|
|
fmadds.=Floating Multiply-Add Single
|
|
fmadds=Floating Multiply-Add Single
|
|
fmr.=Floating Move Register
|
|
fmr=Floating Move Register
|
|
fmrgew=Floating Merge Even Word
|
|
fmrgow=Floating Merge Odd Word
|
|
fmsub.=Floating Multiply-Subtract
|
|
fmsub=Floating Multiply-Subtract
|
|
fmsubs.=Floating Multiply-Subtract Single
|
|
fmsubs=Floating Multiply-Subtract Single
|
|
fmul.=Floating Multiply
|
|
fmul=Floating Multiply
|
|
fmuls.=Floating Multiply Single
|
|
fmuls=Floating Multiply Single
|
|
fnabs.=Floating Negative Absolute Value
|
|
fnabs=Floating Negative Absolute Value
|
|
fneg.=Floating Negate
|
|
fneg=Floating Negate
|
|
fnmadd.=Floating Negative Multiply-Add
|
|
fnmadd=Floating Negative Multiply-Add
|
|
fnmadds.=Floating Negative Multiply-Add Single
|
|
fnmadds=Floating Negative Multiply-Add Single
|
|
fnmsub.=Floating Negative Multiply-Subtract
|
|
fnmsub=Floating Negative Multiply-Subtract
|
|
fnmsubs.=Floating Negative Multiply-Subtract Single
|
|
fnmsubs=Floating Negative Multiply-Subtract Single
|
|
fre.=Floating Reciprocal Estimate
|
|
fre=Floating Reciprocal Estimate
|
|
fres.=Floating Reciprocal Estimate Single
|
|
fres=Floating Reciprocal Estimate Single
|
|
frim.=Floating Round To Integer Minus
|
|
frim=Floating Round To Integer Minus
|
|
frin.=Floating Round To Integer Nearest
|
|
frin=Floating Round To Integer Nearest
|
|
frip.=Floating Round To Integer Plus
|
|
frip=Floating Round To Integer Plus
|
|
friz.=Floating Round To Integer Zero
|
|
friz=Floating Round To Integer Zero
|
|
frsp.=Floating Round to Single-Precision
|
|
frsp=Floating Round to Single-Precision
|
|
frsqrte.=Floating Reciprocal Square Root Estimate
|
|
frsqrte=Floating Reciprocal Square Root Estimate
|
|
frsqrtes.=Floating Reciprocal Square Root Estimate Single
|
|
frsqrtes=Floating Reciprocal Square Root Estimate Single
|
|
fsel.=Floating Select
|
|
fsel=Floating Select
|
|
fsqrt.=Floating Square Root
|
|
fsqrt=Floating Square Root
|
|
fsqrts.=Floating Square Root Single
|
|
fsqrts=Floating Square Root Single
|
|
fsub.=Floating Subtract
|
|
fsub=Floating Subtract
|
|
fsubs.=Floating Subtract Single
|
|
fsubs=Floating Subtract Single
|
|
ftdiv=Floating Test for software Divide
|
|
ftsqrt=Floating Test for software Square Root
|
|
hrfid=Return From Interrupt Doubleword Hypervisor
|
|
icbi=Instruction Cache Block Invalidate
|
|
icbt=Instruction Cache Block Touch
|
|
isel=Integer Select
|
|
isync=Instruction Synchronize
|
|
lbarx=Load Byte And Reserve Indexed
|
|
lbz=Load Byte & Zero
|
|
lbzcix=Load Byte & Zero Caching Inhibited Indexed
|
|
lbzu=Load Byte & Zero with Update
|
|
lbzux=Load Byte & Zero with Update Indexed
|
|
lbzx=Load Byte & Zero Indexed
|
|
ld=Load Doubleword
|
|
ldarx=Load Doubleword And Reserve Indexed
|
|
ldat=Load Doubleword ATomic
|
|
ldbrx=Load Doubleword Byte-Reverse Indexed
|
|
ldcix=Load Doubleword Caching Inhibited Indexed
|
|
ldu=Load Doubleword with Update
|
|
ldux=Load Doubleword with Update Indexed
|
|
ldx=Load Doubleword Indexed
|
|
lfd=Load Floating Double
|
|
lfdp=Load Floating Double Pair
|
|
lfdpx=Load Floating Double Pair Indexed
|
|
lfdu=Load Floating Double with Update
|
|
lfdux=Load Floating Double with Update Indexed
|
|
lfdx=Load Floating Double Indexed
|
|
lfiwax=Load Floating as Integer Word Algebraic Indexed
|
|
lfiwzx=Load Floating as Integer Word & Zero Indexed
|
|
lfs=Load Floating Single
|
|
lfsu=Load Floating Single with Update
|
|
lfsux=Load Floating Single with Update Indexed
|
|
lfsx=Load Floating Single Indexed
|
|
lha=Load Halfword Algebraic
|
|
lharx=Load Halfword And Reserve Indexed Xform
|
|
lhau=Load Halfword Algebraic with Update
|
|
lhaux=Load Halfword Algebraic with Update Indexed
|
|
lhax=Load Halfword Algebraic Indexed
|
|
lhbrx=Load Halfword Byte-Reverse Indexed
|
|
lhz=Load Halfword & Zero
|
|
lhzcix=Load Halfword & Zero Caching Inhibited Indexed
|
|
lhzu=Load Halfword & Zero with Update
|
|
lhzux=Load Halfword & Zero with Update Indexed
|
|
lhzx=Load Halfword & Zero Indexed
|
|
lmw=Load Multiple Word
|
|
lq=Load Quadword
|
|
lqarx=Load Quadword And Reserve Indexed
|
|
lswi=Load String Word Immediate
|
|
lswx=Load String Word Indexed
|
|
lvebx=Load Vector Element Byte Indexed
|
|
lvehx=Load Vector Element Halfword Indexed
|
|
lvewx=Load Vector Element Word Indexed
|
|
lvsl=Load Vector for Shift Left
|
|
lvsr=Load Vector for Shift Right
|
|
lvx=Load Vector Indexed
|
|
lvxl=Load Vector Indexed Last
|
|
lwa=Load Word Algebraic
|
|
lwarx=Load Word & Reserve Indexed
|
|
lwat=Load Word ATomic
|
|
lwaux=Load Word Algebraic with Update Indexed
|
|
lwax=Load Word Algebraic Indexed
|
|
lwbrx=Load Word Byte-Reverse Indexed
|
|
lwz=Load Word & Zero
|
|
lwzcix=Load Word & Zero Caching Inhibited Indexed
|
|
lwzu=Load Word & Zero with Update
|
|
lwzux=Load Word & Zero with Update Indexed
|
|
lwzx=Load Word & Zero Indexed
|
|
lxsd=Load VSX Scalar Doubleword
|
|
lxsdx=Load VSX Scalar Doubleword Indexed
|
|
lxsibzx=Load VSX Scalar as Integer Byte & Zero Indexed
|
|
lxsihzx=Load VSX Scalar as Integer Halfword & Zero Indexed
|
|
lxsiwax=Load VSX Scalar as Integer Word Algebraic Indexed
|
|
lxsiwzx=Load VSX Scalar as Integer Word & Zero Indexed
|
|
lxssp=Load VSX Scalar Single
|
|
lxsspx=Load VSX Scalar Single-Precision Indexed
|
|
lxv=Load VSX Vector
|
|
lxvb16x=Load VSX Vector Byte*16 Indexed
|
|
lxvd2x=Load VSX Vector Doubleword*2 Indexed
|
|
lxvdsx=Load VSX Vector Doubleword & Splat Indexed
|
|
lxvh8x=Load VSX Vector Halfword*8 Indexed
|
|
lxvl=Load VSX Vector with Length
|
|
lxvll=Load VSX Vector Left-justified with Length
|
|
lxvw4x=Load VSX Vector Word*4 Indexed
|
|
lxvwsx=Load VSX Vector Word & Splat Indexed
|
|
lxvx=Load VSX Vector Indexed
|
|
maddhd=Multiply-Add High Doubleword
|
|
maddhdu=Multiply-Add High Doubleword Unsigned
|
|
maddld=Multiply-Add Low Doubleword
|
|
mcrf=Move CR Field
|
|
mcrfs=Move To CR from FPSCR
|
|
mcrxrx=Move XER to CR Extended
|
|
mfbhrbe=Move From BHRB
|
|
mfcr=Move From CR
|
|
mffs.=Move From FPSCR
|
|
mffs=Move From FPSCR
|
|
mffscdrn=Move From FPSCR Control & set DRN
|
|
mffscdrni=Move From FPSCR Control & set DRN Immediate
|
|
mffsce=Move From FPSCR & Clear Enables
|
|
mffscrn=Move From FPSCR Control & set RN
|
|
mffscrni=Move From FPSCR Control & set RN Immediate
|
|
mffsl=Move From FPSCR Lightweight
|
|
mfmsr=Move From MSR
|
|
mfocrf=Move From One CR Field
|
|
mfspr=Move From SPR
|
|
mftb=Move From Time Base
|
|
mfvscr=Move From VSCR
|
|
mfvsrd=Move From VSR Doubleword
|
|
mfvsrld=Move From VSR Lower Doubleword
|
|
mfvsrwz=Move From VSR Word & Zero
|
|
modsd=Modulo Signed Doubleword
|
|
modsw=Modulo Signed Word
|
|
modud=Modulo Unsigned Doubleword
|
|
moduw=Modulo Unsigned Word
|
|
msgclr=Message Clear
|
|
msgclrp=Message Clear Privileged
|
|
msgsnd=Message Send
|
|
msgsndp=Message Send Privileged
|
|
msgsync=Message Synchronize
|
|
mtcrf=Move To CR Fields
|
|
mtfsb0.=Move To FPSCR Bit 0
|
|
mtfsb0=Move To FPSCR Bit 0
|
|
mtfsb1.=Move To FPSCR Bit 1
|
|
mtfsb1=Move To FPSCR Bit 1
|
|
mtfsf.=Move To FPSCR Fields
|
|
mtfsf=Move To FPSCR Fields
|
|
mtfsfi.=Move To FPSCR Field Immediate
|
|
mtfsfi=Move To FPSCR Field Immediate
|
|
mtmsr=Move To MSR
|
|
mtmsrd=Move To MSR Doubleword
|
|
mtocrf=Move To One CR Field
|
|
mtspr=Move To SPR
|
|
mtvscr=Move To VSCR
|
|
mtvsrd=Move To VSR Doubleword
|
|
mtvsrdd=Move To VSR Double Doubleword
|
|
mtvsrwa=Move To VSR Word Algebraic
|
|
mtvsrws=Move To VSR Word & Splat
|
|
mtvsrwz=Move To VSR Word & Zero
|
|
mulhd.=Multiply High Doubleword
|
|
mulhd=Multiply High Doubleword
|
|
mulhdu.=Multiply High Doubleword Unsigned
|
|
mulhdu=Multiply High Doubleword Unsigned
|
|
mulhw.=Multiply High Word
|
|
mulhw=Multiply High Word
|
|
mulhwu.=Multiply High Word Unsigned
|
|
mulhwu=Multiply High Word Unsigned
|
|
mulld.=Multiply Low Doubleword
|
|
mulld=Multiply Low Doubleword
|
|
mulldo.=Multiply Low Doubleword
|
|
mulldo=Multiply Low Doubleword
|
|
mulli=Multiply Low Immediate
|
|
mullw.=Multiply Low Word
|
|
mullw=Multiply Low Word
|
|
mullwo.=Multiply Low Word
|
|
mullwo=Multiply Low Word
|
|
nand.=NAND
|
|
nand=NAND
|
|
neg.=Negate
|
|
neg=Negate
|
|
nego.=Negate
|
|
nego=Negate
|
|
nor.=NOR
|
|
nor=NOR
|
|
or.=OR
|
|
or=OR
|
|
orc.=OR with Complement
|
|
orc=OR with Complement
|
|
ori=OR Immediate
|
|
oris=OR Immediate Shifted
|
|
paste.=Paste
|
|
paste=Paste
|
|
popcntb=Population Count Byte
|
|
popcntd=Population Count Doubleword
|
|
popcntw=Population Count Words
|
|
prtyd=Parity Doubleword
|
|
prtyw=Parity Word
|
|
rfebb=Return from Event Based Branch
|
|
rfid=Return from Interrupt Doubleword
|
|
rfscv=Return From System Call Vectored
|
|
rldcl.=Rotate Left Doubleword then Clear Left
|
|
rldcl=Rotate Left Doubleword then Clear Left
|
|
rldcr.=Rotate Left Doubleword then Clear Right
|
|
rldcr=Rotate Left Doubleword then Clear Right
|
|
rldic.=Rotate Left Doubleword Immediate then Clear
|
|
rldic=Rotate Left Doubleword Immediate then Clear
|
|
rldicl.=Rotate Left Doubleword Immediate then Clear Left
|
|
rldicl=Rotate Left Doubleword Immediate then Clear Left
|
|
rldicr.=Rotate Left Doubleword Immediate then Clear Right
|
|
rldicr=Rotate Left Doubleword Immediate then Clear Right
|
|
rldimi.=Rotate Left Doubleword Immediate then Mask Insert
|
|
rldimi=Rotate Left Doubleword Immediate then Mask Insert
|
|
rlwimi.=Rotate Left Word Immediate then Mask Insert
|
|
rlwimi=Rotate Left Word Immediate then Mask Insert
|
|
rlwinm.=Rotate Left Word Immediate then AND with Mask
|
|
rlwinm=Rotate Left Word Immediate then AND with Mask
|
|
rlwnm.=Rotate Left Word then AND with Mask
|
|
rlwnm=Rotate Left Word then AND with Mask
|
|
sc=System Call
|
|
scv=System Call Vectored
|
|
setb=Set Boolean
|
|
slbfee.=SLB Find Entry ESID & record
|
|
slbia=SLB Invalidate All
|
|
slbiag=SLB Invalidate All Global
|
|
slbie=SLB Invalidate Entry
|
|
slbieg=SLB Invalidate Entry Global
|
|
slbmfee=SLB Move From Entry ESID
|
|
slbmfev=SLB Move From Entry VSID
|
|
slbmte=SLB Move To Entry
|
|
slbsync=SLB Synchronize
|
|
sld.=Shift Left Doubleword
|
|
sld=Shift Left Doubleword
|
|
slw.=Shift Left Word
|
|
slw=Shift Left Word
|
|
srad.=Shift Right Algebraic Doubleword
|
|
srad=Shift Right Algebraic Doubleword
|
|
sradi.=Shift Right Algebraic Doubleword Immediate
|
|
sradi=Shift Right Algebraic Doubleword Immediate
|
|
sraw.=Shift Right Algebraic Word
|
|
sraw=Shift Right Algebraic Word
|
|
srawi.=Shift Right Algebraic Word Immediate
|
|
srawi=Shift Right Algebraic Word Immediate
|
|
srd.=Shift Right Doubleword
|
|
srd=Shift Right Doubleword
|
|
srw.=Shift Right Word
|
|
srw=Shift Right Word
|
|
stb=Store Byte
|
|
stbcix=Store Byte Caching Inhibited Indexed
|
|
stbcx.=Store Byte Conditional Indexed & record
|
|
stbu=Store Byte with Update
|
|
stbux=Store Byte with Update Indexed
|
|
stbx=Store Byte Indexed
|
|
std=Store Doubleword
|
|
stdat=Store Doubleword ATomic
|
|
stdbrx=Store Doubleword Byte-Reverse Indexed
|
|
stdcix=Store Doubleword Caching Inhibited Indexed
|
|
stdcx.=Store Doubleword Conditional Indexed & record
|
|
stdu=Store Doubleword with Update
|
|
stdux=Store Doubleword with Update Indexed
|
|
stdx=Store Doubleword Indexed
|
|
stfd=Store Floating Double
|
|
stfdp=Store Floating Double Pair
|
|
stfdpx=Store Floating Double Pair Indexed
|
|
stfdu=Store Floating Double with Update
|
|
stfdux=Store Floating Double with Update Indexed
|
|
stfdx=Store Floating Double Indexed
|
|
stfiwx=Store Floating as Integer Word Indexed
|
|
stfs=Store Floating Single
|
|
stfsu=Store Floating Single with Update
|
|
stfsux=Store Floating Single with Update Indexed
|
|
stfsx=Store Floating Single Indexed
|
|
sth=Store Halfword
|
|
sthbrx=Store Halfword Byte-Reverse Indexed
|
|
sthcix=Store Halfword Caching Inhibited Indexed
|
|
sthcx.=Store Halfword Conditional Indexed & record
|
|
sthu=Store Halfword with Update
|
|
sthux=Store Halfword with Update Indexed
|
|
sthx=Store Halfword Indexed
|
|
stmw=Store Multiple Word
|
|
stop=Stop
|
|
stq=Store Quadword
|
|
stqcx.=Store Quadword Conditional Indexed & record
|
|
stswi=Store String Word Immediate
|
|
stswx=Store String Word Indexed
|
|
stvebx=Store Vector Element Byte Indexed
|
|
stvehx=Store Vector Element Halfword Indexed
|
|
stvewx=Store Vector Element Word Indexed
|
|
stvx=Store Vector Indexed
|
|
stvxl=Store Vector Indexed Last
|
|
stw=Store Word
|
|
stwat=Store Word ATomic
|
|
stwbrx=Store Word Byte-Reverse Indexed
|
|
stwcix=Store Word Caching Inhibited Indexed
|
|
stwcx.=Store Word Conditional Indexed & record
|
|
stwu=Store Word with Update
|
|
stwux=Store Word with Update Indexed
|
|
stwx=Store Word Indexed
|
|
stxsd=Store VSX Scalar Doubleword
|
|
stxsdx=Store VSX Scalar Doubleword Indexed
|
|
stxsibx=Store VSX Scalar as Integer Byte Indexed
|
|
stxsihx=Store VSX Scalar as Integer Halfword Indexed
|
|
stxsiwx=Store VSX Scalar as Integer Word Indexed
|
|
stxssp=Store VSX Scalar Single-Precision
|
|
stxsspx=Store VSX Scalar Single-Precision Indexed
|
|
stxv=Store VSX Vector
|
|
stxvb16x=Store VSX Vector Byte*16 Indexed
|
|
stxvd2x=Store VSX Vector Doubleword*2 Indexed
|
|
stxvh8x=Store VSX Vector Halfword*8 Indexed
|
|
stxvl=Store VSX Vector with Length
|
|
stxvll=Store VSX Vector Left-justified with Length
|
|
stxvw4x=Store VSX Vector Word*4 Indexed
|
|
stxvx=Store VSX Vector Indexed
|
|
subf.=Subtract From
|
|
subf=Subtract From
|
|
subfc.=Subtract From Carrying
|
|
subfc=Subtract From Carrying
|
|
subfco.=Subtract From Carrying
|
|
subfco=Subtract From Carrying
|
|
subfe.=Subtract From Extended
|
|
subfe=Subtract From Extended
|
|
subfeo.=Subtract From Extended
|
|
subfeo=Subtract From Extended
|
|
subfic=Subtract From Immediate Carrying
|
|
subfme.=Subtract From Minus One Extended
|
|
subfme=Subtract From Minus One Extended
|
|
subfmeo.=Subtract From Minus One Extended
|
|
subfmeo=Subtract From Minus One Extended
|
|
subfo.=Subtract From
|
|
subfo=Subtract From
|
|
subfze.=Subtract From Zero Extended
|
|
subfze=Subtract From Zero Extended
|
|
subfzeo.=Subtract From Zero Extended
|
|
subfzeo=Subtract From Zero Extended
|
|
sync=Synchronize
|
|
tabort.=Transaction Abort & record
|
|
tabortdc.=Transaction Abort Doubleword Conditional & record
|
|
tabortdci.=Transaction Abort Doubleword Conditional Immediate & record
|
|
tabortwc.=Transaction Abort Word Conditional & record
|
|
tabortwci.=Transaction Abort Word Conditional Immediate & record
|
|
tbegin.=Transaction Begin & record
|
|
tcheck=Transaction Check & record
|
|
td=Trap Doubleword
|
|
tdi=Trap Doubleword Immediate
|
|
tend.=Transaction End & record
|
|
tlbie=TLB Invalidate Entry
|
|
tlbiel=TLB Invalidate Entry Local
|
|
tlbsync=TLB Synchronize
|
|
trechkpt.=Transaction Recheckpoint & record
|
|
treclaim.=Transaction Reclaim & record
|
|
tsr.=Transaction Suspend or Resume & record
|
|
tw=Trap Word
|
|
twi=Trap Word Immediate
|
|
vabsdub=Vector Absolute Difference Unsigned Byte
|
|
vabsduh=Vector Absolute Difference Unsigned Halfword
|
|
vabsduw=Vector Absolute Difference Unsigned Word
|
|
vaddcuq=Vector Add & write Carry Unsigned Quadword
|
|
vaddcuw=Vector Add & Write Carry-Out Unsigned Word
|
|
vaddecuq=Vector Add Extended & write Carry Unsigned Quadword
|
|
vaddeuqm=Vector Add Extended Unsigned Quadword Modulo
|
|
vaddfp=Vector Add Floating-Point
|
|
vaddsbs=Vector Add Signed Byte Saturate
|
|
vaddshs=Vector Add Signed Halfword Saturate
|
|
vaddsws=Vector Add Signed Word Saturate
|
|
vaddubm=Vector Add Unsigned Byte Modulo
|
|
vaddubs=Vector Add Unsigned Byte Saturate
|
|
vaddudm=Vector Add Unsigned Doubleword Modulo
|
|
vadduhm=Vector Add Unsigned Halfword Modulo
|
|
vadduhs=Vector Add Unsigned Halfword Saturate
|
|
vadduqm=Vector Add Unsigned Quadword Modulo
|
|
vadduwm=Vector Add Unsigned Word Modulo
|
|
vadduws=Vector Add Unsigned Word Saturate
|
|
vand=Vector Logical AND
|
|
vandc=Vector Logical AND with Complement
|
|
vavgsb=Vector Average Signed Byte
|
|
vavgsh=Vector Average Signed Halfword
|
|
vavgsw=Vector Average Signed Word
|
|
vavgub=Vector Average Unsigned Byte
|
|
vavguh=Vector Average Unsigned Halfword
|
|
vavguw=Vector Average Unsigned Word
|
|
vbpermd=Vector Bit Permute Doubleword
|
|
vbpermq=Vector Bit Permute Quadword
|
|
vcfsx=Vector Convert with round to nearest Signed Word format to FP
|
|
vcfux=Vector Convert with round to nearest Unsigned Word format to FP
|
|
vcipher=Vector AES Cipher
|
|
vcipherlast=Vector AES Cipher Last
|
|
vclzb=Vector Count Leading Zeros Byte
|
|
vclzd=Vector Count Leading Zeros Doubleword
|
|
vclzh=Vector Count Leading Zeros Halfword
|
|
vclzlsbb=Vector Count Leading Zero Least-Significant Bits Byte
|
|
vclzw=Vector Count Leading Zeros Word
|
|
vcmpbfp.=Vector Compare Bounds Floating-Point
|
|
vcmpbfp=Vector Compare Bounds Floating-Point
|
|
vcmpeqfp.=Vector Compare Equal To Floating-Point
|
|
vcmpeqfp=Vector Compare Equal To Floating-Point
|
|
vcmpequb.=Vector Compare Equal To Unsigned Byte
|
|
vcmpequb=Vector Compare Equal To Unsigned Byte
|
|
vcmpequd.=Vector Compare Equal To Unsigned Doubleword
|
|
vcmpequd=Vector Compare Equal To Unsigned Doubleword
|
|
vcmpequh.=Vector Compare Equal To Unsigned Halfword
|
|
vcmpequh=Vector Compare Equal To Unsigned Halfword
|
|
vcmpequw.=Vector Compare Equal To Unsigned Word
|
|
vcmpequw=Vector Compare Equal To Unsigned Word
|
|
vcmpgefp.=Vector Compare Greater Than or Equal To Floating-Point
|
|
vcmpgefp=Vector Compare Greater Than or Equal To Floating-Point
|
|
vcmpgtfp.=Vector Compare Greater Than Floating-Point
|
|
vcmpgtfp=Vector Compare Greater Than Floating-Point
|
|
vcmpgtsb.=Vector Compare Greater Than Signed Byte
|
|
vcmpgtsb=Vector Compare Greater Than Signed Byte
|
|
vcmpgtsd.=Vector Compare Greater Than Signed Doubleword
|
|
vcmpgtsd=Vector Compare Greater Than Signed Doubleword
|
|
vcmpgtsh.=Vector Compare Greater Than Signed Halfword
|
|
vcmpgtsh=Vector Compare Greater Than Signed Halfword
|
|
vcmpgtsw.=Vector Compare Greater Than Signed Word
|
|
vcmpgtsw=Vector Compare Greater Than Signed Word
|
|
vcmpgtub.=Vector Compare Greater Than Unsigned Byte
|
|
vcmpgtub=Vector Compare Greater Than Unsigned Byte
|
|
vcmpgtud.=Vector Compare Greater Than Unsigned Doubleword
|
|
vcmpgtud=Vector Compare Greater Than Unsigned Doubleword
|
|
vcmpgtuh.=Vector Compare Greater Than Unsigned Halfword
|
|
vcmpgtuh=Vector Compare Greater Than Unsigned Halfword
|
|
vcmpgtuw.=Vector Compare Greater Than Unsigned Word
|
|
vcmpgtuw=Vector Compare Greater Than Unsigned Word
|
|
vcmpneb.=Vector Compare Not Equal Byte
|
|
vcmpneb=Vector Compare Not Equal Byte
|
|
vcmpneh.=Vector Compare Not Equal Halfword
|
|
vcmpneh=Vector Compare Not Equal Halfword
|
|
vcmpnew.=Vector Compare Not Equal Word
|
|
vcmpnew=Vector Compare Not Equal Word
|
|
vcmpnezb.=Vector Compare Not Equal or Zero Byte
|
|
vcmpnezb=Vector Compare Not Equal or Zero Byte
|
|
vcmpnezh.=Vector Compare Not Equal or Zero Halfword
|
|
vcmpnezh=Vector Compare Not Equal or Zero Halfword
|
|
vcmpnezw.=Vector Compare Not Equal or Zero Word
|
|
vcmpnezw=Vector Compare Not Equal or Zero Word
|
|
vctsxs=Vector Convert with round to zero FP To Signed Word format Saturate
|
|
vctuxs=Vector Convert with round to zero FP To Unsigned Word format Saturate
|
|
vctzb=Vector Count Trailing Zeros Byte
|
|
vctzd=Vector Count Trailing Zeros Doubleword
|
|
vctzh=Vector Count Trailing Zeros Halfword
|
|
vctzlsbb=Vector Count Trailing Zero Least-Significant Bits Byte
|
|
vctzw=Vector Count Trailing Zeros Word
|
|
veqv=Vector Equivalence
|
|
vexptefp=Vector 2 Raised to the Exponent Estimate Floating-Point
|
|
vextractd=Vector Extract Doubleword
|
|
vextractub=Vector Extract Unsigned Byte
|
|
vextractuh=Vector Extract Unsigned Halfword
|
|
vextractuw=Vector Extract Unsigned Word
|
|
vextsb2d=Vector Extend Sign Byte to Doubleword
|
|
vextsb2w=Vector Extend Sign Byte to Word
|
|
vextsh2d=Vector Extend Sign Halfword to Doubleword
|
|
vextsh2w=Vector Extend Sign Halfword to Word
|
|
vextsw2d=Vector Extend Sign Word to Doubleword
|
|
vextublx=Vector Extract Unsigned Byte Left-Indexed
|
|
vextubrx=Vector Extract Unsigned Byte Right-Indexed
|
|
vextuhlx=Vector Extract Unsigned Halfword Left-Indexed
|
|
vextuhrx=Vector Extract Unsigned Halfword Right-Indexed
|
|
vextuwlx=Vector Extract Unsigned Word Left-Indexed
|
|
vextuwrx=Vector Extract Unsigned Word Right-Indexed
|
|
vgbbd=Vector Gather Bits by Byte by Doubleword
|
|
vinsertb=Vector Insert Byte
|
|
vinsertd=Vector Insert Doubleword
|
|
vinserth=Vector Insert Halfword
|
|
vinsertw=Vector Insert Word
|
|
vlogefp=Vector Log Base 2 Estimate Floating-Point
|
|
vmaddfp=Vector Multiply-Add Floating-Point
|
|
vmaxfp=Vector Maximum Floating-Point
|
|
vmaxsb=Vector Maximum Signed Byte
|
|
vmaxsd=Vector Maximum Signed Doubleword
|
|
vmaxsh=Vector Maximum Signed Halfword
|
|
vmaxsw=Vector Maximum Signed Word
|
|
vmaxub=Vector Maximum Unsigned Byte
|
|
vmaxud=Vector Maximum Unsigned Doubleword
|
|
vmaxuh=Vector Maximum Unsigned Halfword
|
|
vmaxuw=Vector Maximum Unsigned Word
|
|
vmhaddshs=Vector Multiply-High-Add Signed Halfword Saturate
|
|
vmhraddshs=Vector Multiply-High-Round-Add Signed Halfword Saturate
|
|
vminfp=Vector Minimum Floating-Point
|
|
vminsb=Vector Minimum Signed Byte
|
|
vminsd=Vector Minimum Signed Doubleword
|
|
vminsh=Vector Minimum Signed Halfword
|
|
vminsw=Vector Minimum Signed Word
|
|
vminub=Vector Minimum Unsigned Byte
|
|
vminud=Vector Minimum Unsigned Doubleword
|
|
vminuh=Vector Minimum Unsigned Halfword
|
|
vminuw=Vector Minimum Unsigned Word
|
|
vmladduhm=Vector Multiply-Low-Add Unsigned Halfword Modulo
|
|
vmrgew=Vector Merge Even Word
|
|
vmrghb=Vector Merge High Byte
|
|
vmrghh=Vector Merge High Halfword
|
|
vmrghw=Vector Merge High Word
|
|
vmrglb=Vector Merge Low Byte
|
|
vmrglh=Vector Merge Low Halfword
|
|
vmrglw=Vector Merge Low Word
|
|
vmrgow=Vector Merge Odd Word
|
|
vmsummbm=Vector Multiply-Sum Mixed Byte Modulo
|
|
vmsumshm=Vector Multiply-Sum Signed Halfword Modulo
|
|
vmsumshs=Vector Multiply-Sum Signed Halfword Saturate
|
|
vmsumubm=Vector Multiply-Sum Unsigned Byte Modulo
|
|
vmsumudm=Vector Multiply-Sum Unsigned Doubleword Modulo
|
|
vmsumuhm=Vector Multiply-Sum Unsigned Halfword Modulo
|
|
vmsumuhs=Vector Multiply-Sum Unsigned Halfword Saturate
|
|
vmul10cuq=Vector Multiply-by-10 & write Carry Unsigned Quadword
|
|
vmul10ecuq=Vector Multiply-by-10 Extended & write Carry Unsigned Quadword
|
|
vmul10euq=Vector Multiply-by-10 Extended Unsigned Quadword
|
|
vmul10uq=Vector Multiply-by-10 Unsigned Quadword
|
|
vmulesb=Vector Multiply Even Signed Byte
|
|
vmulesh=Vector Multiply Even Signed Halfword
|
|
vmulesw=Vector Multiply Even Signed Word
|
|
vmuleub=Vector Multiply Even Unsigned Byte
|
|
vmuleuh=Vector Multiply Even Unsigned Halfword
|
|
vmuleuw=Vector Multiply Even Unsigned Word
|
|
vmulosb=Vector Multiply Odd Signed Byte
|
|
vmulosh=Vector Multiply Odd Signed Halfword
|
|
vmulosw=Vector Multiply Odd Signed Word
|
|
vmuloub=Vector Multiply Odd Unsigned Byte
|
|
vmulouh=Vector Multiply Odd Unsigned Halfword
|
|
vmulouw=Vector Multiply Odd Unsigned Word
|
|
vmuluwm=Vector Multiply Unsigned Word Modulo
|
|
vnand=Vector NAND
|
|
vncipher=Vector AES Inverse Cipher
|
|
vncipherlast=Vector AES Inverse Cipher Last
|
|
vnegd=Vector Negate Doubleword
|
|
vnegw=Vector Negate Word
|
|
vnmsubfp=Vector Negative Multiply-Subtract Floating-Point
|
|
vnor=Vector Logical NOR
|
|
vor=Vector Logical OR
|
|
vorc=Vector OR with Complement
|
|
vperm=Vector Permute
|
|
vpermr=Vector Permute Right-indexed
|
|
vpermxor=Vector Permute & Exclusive-OR
|
|
vpkpx=Vector Pack Pixel
|
|
vpksdss=Vector Pack Signed Doubleword Signed Saturate
|
|
vpksdus=Vector Pack Signed Doubleword Unsigned Saturate
|
|
vpkshss=Vector Pack Signed Halfword Signed Saturate
|
|
vpkshus=Vector Pack Signed Halfword Unsigned Saturate
|
|
vpkswss=Vector Pack Signed Word Signed Saturate
|
|
vpkswus=Vector Pack Signed Word Unsigned Saturate
|
|
vpkudum=Vector Pack Unsigned Doubleword Unsigned Modulo
|
|
vpkudus=Vector Pack Unsigned Doubleword Unsigned Saturate
|
|
vpkuhum=Vector Pack Unsigned Halfword Unsigned Modulo
|
|
vpkuhus=Vector Pack Unsigned Halfword Unsigned Saturate
|
|
vpkuwum=Vector Pack Unsigned Word Unsigned Modulo
|
|
vpkuwus=Vector Pack Unsigned Word Unsigned Saturate
|
|
vpmsumb=Vector Polynomial Multiply-Sum Byte
|
|
vpmsumd=Vector Polynomial Multiply-Sum Doubleword
|
|
vpmsumh=Vector Polynomial Multiply-Sum Halfword
|
|
vpmsumw=Vector Polynomial Multiply-Sum Word
|
|
vpopcntb=Vector Population Count Byte
|
|
vpopcntd=Vector Population Count Doubleword
|
|
vpopcnth=Vector Population Count Halfword
|
|
vpopcntw=Vector Population Count Word
|
|
vprtybd=Vector Parity Byte Doubleword
|
|
vprtybq=Vector Parity Byte Quadword
|
|
vprtybw=Vector Parity Byte Word
|
|
vrefp=Vector Reciprocal Estimate Floating-Point
|
|
vrfim=Vector Round to Floating-Point Integral toward -Infinity
|
|
vrfin=Vector Round to Floating-Point Integral Nearest
|
|
vrfip=Vector Round to Floating-Point Integral toward +Infinity
|
|
vrfiz=Vector Round to Floating-Point Integral toward Zero
|
|
vrlb=Vector Rotate Left Byte
|
|
vrld=Vector Rotate Left Doubleword
|
|
vrldmi=Vector Rotate Left Doubleword then Mask Insert
|
|
vrldnm=Vector Rotate Left Doubleword then AND with Mask
|
|
vrlh=Vector Rotate Left Halfword
|
|
vrlw=Vector Rotate Left Word
|
|
vrlwmi=Vector Rotate Left Word then Mask Insert
|
|
vrlwnm=Vector Rotate Left Word then AND with Mask
|
|
vrsqrtefp=Vector Reciprocal Square Root Estimate Floating-Point
|
|
vsbox=Vector AES S-Box
|
|
vsel=Vector Select
|
|
vshasigmad=Vector SHA-512 Sigma Doubleword
|
|
vshasigmaw=Vector SHA-256 Sigma Word
|
|
vsl=Vector Shift Left
|
|
vslb=Vector Shift Left Byte
|
|
vsld=Vector Shift Left Doubleword
|
|
vsldoi=Vector Shift Left Double by Octet Immediate
|
|
vslh=Vector Shift Left Halfword
|
|
vslo=Vector Shift Left by Octet
|
|
vslv=Vector Shift Left Variable
|
|
vslw=Vector Shift Left Word
|
|
vspltb=Vector Splat Byte
|
|
vsplth=Vector Splat Halfword
|
|
vspltisb=Vector Splat Immediate Signed Byte
|
|
vspltish=Vector Splat Immediate Signed Halfword
|
|
vspltisw=Vector Splat Immediate Signed Word
|
|
vspltw=Vector Splat Word
|
|
vsr=Vector Shift Right
|
|
vsrab=Vector Shift Right Algebraic Byte
|
|
vsrad=Vector Shift Right Algebraic Doubleword
|
|
vsrah=Vector Shift Right Algebraic Halfword
|
|
vsraw=Vector Shift Right Algebraic Word
|
|
vsrb=Vector Shift Right Byte
|
|
vsrd=Vector Shift Right Doubleword
|
|
vsrh=Vector Shift Right Halfword
|
|
vsro=Vector Shift Right by Octet
|
|
vsrv=Vector Shift Right Variable
|
|
vsrw=Vector Shift Right Word
|
|
vsubcuq=Vector Subtract & write Carry Unsigned Quadword
|
|
vsubcuw=Vector Subtract & Write Carry-Out Unsigned Word
|
|
vsubecuq=Vector Subtract Extended & write Carry Unsigned Quadword
|
|
vsubeuqm=Vector Subtract Extended Unsigned Quadword Modulo
|
|
vsubfp=Vector Subtract Floating-Point
|
|
vsubsbs=Vector Subtract Signed Byte Saturate
|
|
vsubshs=Vector Subtract Signed Halfword Saturate
|
|
vsubsws=Vector Subtract Signed Word Saturate
|
|
vsububm=Vector Subtract Unsigned Byte Modulo
|
|
vsububs=Vector Subtract Unsigned Byte Saturate
|
|
vsubudm=Vector Subtract Unsigned Doubleword Modulo
|
|
vsubuhm=Vector Subtract Unsigned Halfword Modulo
|
|
vsubuhs=Vector Subtract Unsigned Halfword Saturate
|
|
vsubuqm=Vector Subtract Unsigned Quadword Modulo
|
|
vsubuwm=Vector Subtract Unsigned Word Modulo
|
|
vsubuws=Vector Subtract Unsigned Word Saturate
|
|
vsum2sws=Vector Sum across Half Signed Word Saturate
|
|
vsum4sbs=Vector Sum across Quarter Signed Byte Saturate
|
|
vsum4shs=Vector Sum across Quarter Signed Halfword Saturate
|
|
vsum4ubs=Vector Sum across Quarter Unsigned Byte Saturate
|
|
vsumsws=Vector Sum across Signed Word Saturate
|
|
vupkhpx=Vector Unpack High Pixel
|
|
vupkhsb=Vector Unpack High Signed Byte
|
|
vupkhsh=Vector Unpack High Signed Halfword
|
|
vupkhsw=Vector Unpack High Signed Word
|
|
vupklpx=Vector Unpack Low Pixel
|
|
vupklsb=Vector Unpack Low Signed Byte
|
|
vupklsh=Vector Unpack Low Signed Halfword
|
|
vupklsw=Vector Unpack Low Signed Word
|
|
vxor=Vector Logical XOR
|
|
wait=Wait for Interrupt
|
|
xnop=Executed No Operation
|
|
xor.=XOR
|
|
xor=XOR
|
|
xori=XOR Immediate
|
|
xoris=XOR Immediate Shifted
|
|
xsabsdp=VSX Scalar Absolute Double-Precision
|
|
xsabsqp=VSX Scalar Absolute Quad-Precision
|
|
xsadddp=VSX Scalar Add Double-Precision
|
|
xsaddqp=VSX Scalar Add Quad-Precision [with round to Odd]
|
|
xsaddqpo=VSX Scalar Add Quad-Precision [with round to Odd]
|
|
xsaddsp=VSX Scalar Add Single-Precision
|
|
xscmpeqdp=VSX Scalar Compare Equal Double-Precision
|
|
xscmpexpdp=VSX Scalar Compare Exponents Double-Precision
|
|
xscmpexpqp=VSX Scalar Compare Exponents Quad-Precision
|
|
xscmpgedp=VSX Scalar Compare Greater Than or Equal Double-Precision
|
|
xscmpgtdp=VSX Scalar Compare Greater Than Double-Precision
|
|
xscmpodp=VSX Scalar Compare Ordered Double-Precision
|
|
xscmpoqp=VSX Scalar Compare Ordered Quad-Precision
|
|
xscmpudp=VSX Scalar Compare Unordered Double-Precision
|
|
xscmpuqp=VSX Scalar Compare Unordered Quad-Precision
|
|
xscpsgndp=VSX Scalar Copy Sign Double-Precision
|
|
xscpsgnqp=VSX Scalar Copy Sign Quad-Precision
|
|
xscvdphp=VSX Scalar Convert with round Double-Precision to Half-Precision format
|
|
xscvdpqp=VSX Scalar Convert Double-Precision to Quad-Precision format
|
|
xscvdpsp=VSX Scalar Convert with round Double-Precision to Single-Precision format
|
|
xscvdpspn=VSX Scalar Convert Double-Precision to Single-Precision Non-signalling format
|
|
xscvdpsxds=VSX Scalar Convert with round to zero Double-Precision to Signed Doubleword format
|
|
xscvdpsxws=VSX Scalar Convert with round to zero Double-Precision to Signed Word format
|
|
xscvdpuxds=VSX Scalar Convert with round to zero Double-Precision to Unsigned Doubleword format
|
|
xscvdpuxws=VSX Scalar Convert with round to zero Double-Precision to Unsigned Word format
|
|
xscvhpdp=VSX Scalar Convert Half-Precision to Double-Precision format
|
|
xscvqpdp=VSX Scalar Convert with round Quad-Precision to Double-Precision format [with round to Odd]
|
|
xscvqpdpo=VSX Scalar Convert with round Quad-Precision to Double-Precision format [with round to Odd]
|
|
xscvqpsdz=VSX Scalar Convert with round to zero Quad-Precision to Signed Doubleword format
|
|
xscvqpswz=VSX Scalar Convert with round to zero Quad-Precision to Signed Word format
|
|
xscvqpudz=VSX Scalar Convert with round to zero Quad-Precision to Unsigned Doubleword format
|
|
xscvqpuwz=VSX Scalar Convert with round to zero Quad-Precision to Unsigned Word format
|
|
xscvsdqp=VSX Scalar Convert Signed Doubleword to Quad-Precision format
|
|
xscvspdp=VSX Scalar Convert Single-Precision to Double-Precision format
|
|
xscvspdpn=VSX Scalar Convert Single-Precision to Double-Precision Non-signalling format
|
|
xscvsxddp=VSX Scalar Convert with round Signed Doubleword to Double-Precision format
|
|
xscvsxdsp=VSX Scalar Convert with round Signed Doubleword to Single-Precision format
|
|
xscvudqp=VSX Scalar Convert Unsigned Doubleword to Quad-Precision format
|
|
xscvuxddp=VSX Scalar Convert with round Unsigned Doubleword to Double-Precision format
|
|
xscvuxdsp=VSX Scalar Convert with round Unsigned Doubleword to Single-Precision format
|
|
xsdivdp=VSX Scalar Divide Double-Precision
|
|
xsdivqp=VSX Scalar Divide Quad-Precision [with round to Odd]
|
|
xsdivqpo=VSX Scalar Divide Quad-Precision [with round to Odd]
|
|
xsdivsp=VSX Scalar Divide Single-Precision
|
|
xsiexpdp=VSX Scalar Insert Exponent Double-Precision
|
|
xsiexpqp=VSX Scalar Insert Exponent Quad-Precision
|
|
xsmaddadp=VSX Scalar Multiply-Add Type-A Double-Precision
|
|
xsmaddasp=VSX Scalar Multiply-Add Type-A Single-Precision
|
|
xsmaddmdp=VSX Scalar Multiply-Add Type-M Double-Precision
|
|
xsmaddmsp=VSX Scalar Multiply-Add Type-M Single-Precision
|
|
xsmaddqp=VSX Scalar Multiply-Add Quad-Precision [with round to Odd]
|
|
xsmaddqpo=VSX Scalar Multiply-Add Quad-Precision [with round to Odd]
|
|
xsmaxcdp=VSX Scalar Maximum Type-C Double-Precision
|
|
xsmaxdp=VSX Scalar Maximum Double-Precision
|
|
xsmaxjdp=VSX Scalar Maximum Type-J Double-Precision
|
|
xsmincdp=VSX Scalar Minimum Type-C Double-Precision
|
|
xsmindp=VSX Scalar Minimum Double-Precision
|
|
xsminjdp=VSX Scalar Minimum Type-J Double-Precision
|
|
xsmsubadp=VSX Scalar Multiply-Subtract Type-A Double-Precision
|
|
xsmsubasp=VSX Scalar Multiply-Subtract Type-A Single-Precision
|
|
xsmsubmdp=VSX Scalar Multiply-Subtract Type-M Double-Precision
|
|
xsmsubmsp=VSX Scalar Multiply-Subtract Type-M Single-Precision
|
|
xsmsubqp=VSX Scalar Multiply-Subtract Quad-Precision [with round to Odd]
|
|
xsmsubqpo=VSX Scalar Multiply-Subtract Quad-Precision [with round to Odd]
|
|
xsmuldp=VSX Scalar Multiply Double-Precision
|
|
xsmulqp=VSX Scalar Multiply Quad-Precision [with round to Odd]
|
|
xsmulqpo=VSX Scalar Multiply Quad-Precision [with round to Odd]
|
|
xsmulsp=VSX Scalar Multiply Single-Precision
|
|
xsnabsdp=VSX Scalar Negative Absolute Double-Precision
|
|
xsnabsqp=VSX Scalar Negative Absolute Quad-Precision
|
|
xsnegdp=VSX Scalar Negate Double-Precision
|
|
xsnegqp=VSX Scalar Negate Quad-Precision
|
|
xsnmaddadp=VSX Scalar Negative Multiply-Add Type-A Double-Precision
|
|
xsnmaddasp=VSX Scalar Negative Multiply-Add Type-A Single-Precision
|
|
xsnmaddmdp=VSX Scalar Negative Multiply-Add Type-M Double-Precision
|
|
xsnmaddmsp=VSX Scalar Negative Multiply-Add Type-M Single-Precision
|
|
xsnmaddqp=VSX Scalar Negative Multiply-Add Quad-Precision [with round to Odd]
|
|
xsnmaddqpo=VSX Scalar Negative Multiply-Add Quad-Precision [with round to Odd]
|
|
xsnmsubadp=VSX Scalar Negative Multiply-Subtract Type-A Double-Precision
|
|
xsnmsubasp=VSX Scalar Negative Multiply-Subtract Type-A Single-Precision
|
|
xsnmsubmdp=VSX Scalar Negative Multiply-Subtract Type-M Double-Precision
|
|
xsnmsubmsp=VSX Scalar Negative Multiply-Subtract Type-M Single-Precision
|
|
xsnmsubqp=VSX Scalar Negative Multiply-Subtract Quad-Precision [with round to Odd]
|
|
xsnmsubqpo=VSX Scalar Negative Multiply-Subtract Quad-Precision [with round to Odd]
|
|
xsrdpi=VSX Scalar Round Double-Precision to Integral
|
|
xsrdpic=VSX Scalar Round Double-Precision to Integral using Current rounding mode
|
|
xsrdpim=VSX Scalar Round Double-Precision to Integral toward -Infinity
|
|
xsrdpip=VSX Scalar Round Double-Precision to Integral toward +Infinity
|
|
xsrdpiz=VSX Scalar Round Double-Precision to Integral toward Zero
|
|
xsredp=VSX Scalar Reciprocal Estimate Double-Precision
|
|
xsresp=VSX Scalar Reciprocal Estimate Single-Precision
|
|
xsrqpi=VSX Scalar Round Quad-Precision to Integral [Exact]
|
|
xsrqpix=VSX Scalar Round Quad-Precision to Integral [Exact]
|
|
xsrqpxp=VSX Scalar Round Quad-Precision to XP
|
|
xsrsp=VSX Scalar Round Double-Precision to Single-Precision
|
|
xsrsqrtedp=VSX Scalar Reciprocal Square Root Estimate Double-Precision
|
|
xsrsqrtesp=VSX Scalar Reciprocal Square Root Estimate Single-Precision
|
|
xssqrtdp=VSX Scalar Square Root Double-Precision
|
|
xssqrtqp=VSX Scalar Square Root Quad-Precision [with round to Odd]
|
|
xssqrtqpo=VSX Scalar Square Root Quad-Precision [with round to Odd]
|
|
xssqrtsp=VSX Scalar Square Root Single-Precision
|
|
xssubdp=VSX Scalar Subtract Double-Precision
|
|
xssubqp=VSX Scalar Subtract Quad-Precision [with round to Odd]
|
|
xssubqpo=VSX Scalar Subtract Quad-Precision [with round to Odd]
|
|
xssubsp=VSX Scalar Subtract Single-Precision
|
|
xstdivdp=VSX Scalar Test for software Divide Double-Precision
|
|
xstsqrtdp=VSX Scalar Test for software Square Root Double-Precision
|
|
xststdcdp=VSX Scalar Test Data Class Double-Precision
|
|
xststdcqp=VSX Scalar Test Data Class Quad-Precision
|
|
xststdcsp=VSX Scalar Test Data Class Single-Precision
|
|
xsxexpdp=VSX Scalar Extract Exponent Double-Precision
|
|
xsxexpqp=VSX Scalar Extract Exponent Quad-Precision
|
|
xsxsigdp=VSX Scalar Extract Significand Double-Precision
|
|
xsxsigqp=VSX Scalar Extract Significand Quad-Precision
|
|
xvabsdp=VSX Vector Absolute Double-Precision
|
|
xvabssp=VSX Vector Absolute Single-Precision
|
|
xvadddp=VSX Vector Add Double-Precision
|
|
xvaddsp=VSX Vector Add Single-Precision
|
|
xvcmpeqdp.=VSX Vector Compare Equal Double-Precision
|
|
xvcmpeqdp=VSX Vector Compare Equal Double-Precision
|
|
xvcmpeqsp.=VSX Vector Compare Equal Single-Precision
|
|
xvcmpeqsp=VSX Vector Compare Equal Single-Precision
|
|
xvcmpgedp.=VSX Vector Compare Greater Than or Equal Double-Precision
|
|
xvcmpgedp=VSX Vector Compare Greater Than or Equal Double-Precision
|
|
xvcmpgesp.=VSX Vector Compare Greater Than or Equal Single-Precision
|
|
xvcmpgesp=VSX Vector Compare Greater Than or Equal Single-Precision
|
|
xvcmpgtdp.=VSX Vector Compare Greater Than Double-Precision
|
|
xvcmpgtdp=VSX Vector Compare Greater Than Double-Precision
|
|
xvcmpgtsp.=VSX Vector Compare Greater Than Single-Precision
|
|
xvcmpgtsp=VSX Vector Compare Greater Than Single-Precision
|
|
xvcpsgndp=VSX Vector Copy Sign Double-Precision
|
|
xvcpsgnsp=VSX Vector Copy Sign Single-Precision
|
|
xvcvdpsp=VSX Vector Convert with round Double-Precision to Single-Precision format
|
|
xvcvdpsxds=VSX Vector Convert with round to zero Double-Precision to Signed Doubleword format
|
|
xvcvdpsxws=VSX Vector Convert with round to zero Double-Precision to Signed Word format
|
|
xvcvdpuxds=VSX Vector Convert with round to zero Double-Precision to Unsigned Doubleword format
|
|
xvcvdpuxws=VSX Vector Convert with round to zero Double-Precision to Unsigned Word format
|
|
xvcvhpsp=VSX Vector Convert Half-Precision to Single-Precision format
|
|
xvcvspdp=VSX Vector Convert Single-Precision to Double-Precision format
|
|
xvcvsphp=VSX Vector Convert with round Single-Precision to Half-Precision format
|
|
xvcvspsxds=VSX Vector Convert with round to zero Single-Precision to Signed Doubleword format
|
|
xvcvspsxws=VSX Vector Convert with round to zero Single-Precision to Signed Word format
|
|
xvcvspuxds=VSX Vector Convert with round to zero Single-Precision to Unsigned Doubleword format
|
|
xvcvspuxws=VSX Vector Convert with round to zero Single-Precision to Unsigned Word format
|
|
xvcvsxddp=VSX Vector Convert with round Signed Doubleword to Double-Precision format
|
|
xvcvsxdsp=VSX Vector Convert with round Signed Doubleword to Single-Precision format
|
|
xvcvsxwdp=VSX Vector Convert Signed Word to Double-Precision format
|
|
xvcvsxwsp=VSX Vector Convert with round Signed Word to Single-Precision format
|
|
xvcvuxddp=VSX Vector Convert with round Unsigned Doubleword to Double-Precision format
|
|
xvcvuxdsp=VSX Vector Convert with round Unsigned Doubleword to Single-Precision format
|
|
xvcvuxwdp=VSX Vector Convert Unsigned Word to Double-Precision format
|
|
xvcvuxwsp=VSX Vector Convert with round Unsigned Word to Single-Precision format
|
|
xvdivdp=VSX Vector Divide Double-Precision
|
|
xvdivsp=VSX Vector Divide Single-Precision
|
|
xviexpdp=VSX Vector Insert Exponent Double-Precision
|
|
xviexpsp=VSX Vector Insert Exponent Single-Precision
|
|
xvmaddadp=VSX Vector Multiply-Add Type-A Double-Precision
|
|
xvmaddasp=VSX Vector Multiply-Add Type-A Single-Precision
|
|
xvmaddmdp=VSX Vector Multiply-Add Type-M Double-Precision
|
|
xvmaddmsp=VSX Vector Multiply-Add Type-M Single-Precision
|
|
xvmaxdp=VSX Vector Maximum Double-Precision
|
|
xvmaxsp=VSX Vector Maximum Single-Precision
|
|
xvmindp=VSX Vector Minimum Double-Precision
|
|
xvminsp=VSX Vector Minimum Single-Precision
|
|
xvmsubadp=VSX Vector Multiply-Subtract Type-A Double-Precision
|
|
xvmsubasp=VSX Vector Multiply-Subtract Type-A Single-Precision
|
|
xvmsubmdp=VSX Vector Multiply-Subtract Type-M Double-Precision
|
|
xvmsubmsp=VSX Vector Multiply-Subtract Type-M Single-Precision
|
|
xvmuldp=VSX Vector Multiply Double-Precision
|
|
xvmulsp=VSX Vector Multiply Single-Precision
|
|
xvnabsdp=VSX Vector Negative Absolute Double-Precision
|
|
xvnabssp=VSX Vector Negative Absolute Single-Precision
|
|
xvnegdp=VSX Vector Negate Double-Precision
|
|
xvnegsp=VSX Vector Negate Single-Precision
|
|
xvnmaddadp=VSX Vector Negative Multiply-Add Type-A Double-Precision
|
|
xvnmaddasp=VSX Vector Negative Multiply-Add Type-A Single-Precision
|
|
xvnmaddmdp=VSX Vector Negative Multiply-Add Type-M Double-Precision
|
|
xvnmaddmsp=VSX Vector Negative Multiply-Add Type-M Single-Precision
|
|
xvnmsubadp=VSX Vector Negative Multiply-Subtract Type-A Double-Precision
|
|
xvnmsubasp=VSX Vector Negative Multiply-Subtract Type-A Single-Precision
|
|
xvnmsubmdp=VSX Vector Negative Multiply-Subtract Type-M Double-Precision
|
|
xvnmsubmsp=VSX Vector Negative Multiply-Subtract Type-M Single-Precision
|
|
xvrdpi=VSX Vector Round Double-Precision to Integral
|
|
xvrdpic=VSX Vector Round Double-Precision to Integral using Current rounding mode
|
|
xvrdpim=VSX Vector Round Double-Precision to Integral toward -Infinity
|
|
xvrdpip=VSX Vector Round Double-Precision to Integral toward +Infinity
|
|
xvrdpiz=VSX Vector Round Double-Precision to Integral toward Zero
|
|
xvredp=VSX Vector Reciprocal Estimate Double-Precision
|
|
xvresp=VSX Vector Reciprocal Estimate Single-Precision
|
|
xvrspi=VSX Vector Round Single-Precision to Integral
|
|
xvrspic=VSX Vector Round Single-Precision to Integral using Current rounding mode
|
|
xvrspim=VSX Vector Round Single-Precision to Integral toward -Infinity
|
|
xvrspip=VSX Vector Round Single-Precision to Integral toward +Infinity
|
|
xvrspiz=VSX Vector Round Single-Precision to Integral toward Zero
|
|
xvrsqrtedp=VSX Vector Reciprocal Square Root Estimate Double-Precision
|
|
xvrsqrtesp=VSX Vector Reciprocal Square Root Estimate Single-Precision
|
|
xvsqrtdp=VSX Vector Square Root Double-Precision
|
|
xvsqrtsp=VSX Vector Square Root Single-Precision
|
|
xvsubdp=VSX Vector Subtract Double-Precision
|
|
xvsubsp=VSX Vector Subtract Single-Precision
|
|
xvtdivdp=VSX Vector Test for software Divide Double-Precision
|
|
xvtdivsp=VSX Vector Test for software Divide Single-Precision
|
|
xvtsqrtdp=VSX Vector Test for software Square Root Double-Precision
|
|
xvtsqrtsp=VSX Vector Test for software Square Root Single-Precision
|
|
xvtstdcdp=VSX Vector Test Data Class Double-Precision
|
|
xvtstdcsp=VSX Vector Test Data Class Single-Precision
|
|
xvxexpdp=VSX Vector Extract Exponent Double-Precision
|
|
xvxexpsp=VSX Vector Extract Exponent Single-Precision
|
|
xvxsigdp=VSX Vector Extract Significand Double-Precision
|
|
xvxsigsp=VSX Vector Extract Significand Single-Precision
|
|
xxbrd=VSX Vector Byte-Reverse Doubleword
|
|
xxbrh=VSX Vector Byte-Reverse Halfword
|
|
xxbrq=VSX Vector Byte-Reverse Quadword
|
|
xxbrw=VSX Vector Byte-Reverse Word
|
|
xxextractuw=VSX Vector Extract Unsigned Word
|
|
xxinsertw=VSX Vector Insert Word
|
|
xxland=VSX Vector Logical AND
|
|
xxlandc=VSX Vector Logical AND with Complement
|
|
xxleqv=VSX Vector Logical Equivalence
|
|
xxlnand=VSX Vector Logical NAND
|
|
xxlnor=VSX Vector Logical NOR
|
|
xxlor=VSX Vector Logical OR
|
|
xxlorc=VSX Vector Logical OR with Complement
|
|
xxlxor=VSX Vector Logical XOR
|
|
xxmrghw=VSX Vector Merge Word High
|
|
xxmrglw=VSX Vector Merge Word Low
|
|
xxperm=VSX Vector Permute
|
|
xxpermdi=VSX Vector Doubleword Permute Immediate
|
|
xxpermr=VSX Vector Permute Right-indexed
|
|
xxsel=VSX Vector Select
|
|
xxsldwi=VSX Vector Shift Left Double by Word Immediate
|
|
xxspltib=VSX Vector Splat Immediate Byte
|
|
xxspltw=VSX Vector Splat Word
|
|
# PPC VLE
|
|
e_add16i=Add Immediate
|
|
e_add2i.=Add (2 operand) Immediate and Record
|
|
e_add2is=Add (2 operand) Immediate Shifted
|
|
e_addi=Add Scaled Immediate
|
|
e_addi.=Add Scaled Immediate
|
|
e_addic=Add Scaled Immediate Carrying
|
|
e_addic.=Add Scaled Immediate Carrying
|
|
e_and2i=AND (two operand) Immediate
|
|
e_and2is=AND (2 operand) Immediate Shifted
|
|
e_and2i.=AND (two operand) Immediate
|
|
e_and2is.=AND (2 operand) Immediate Shifted
|
|
e_andi=AND Scaled Immediate
|
|
e_andi.=AND Scaled Immediate
|
|
e_b=Branch
|
|
e_bl=Branch and Link
|
|
e_bc=Branch Conditional
|
|
e_bcl=Branch Conditional and Link
|
|
e_cmp16i=Compare Immediate Word
|
|
e_cmph=Compare Halfword
|
|
e_cmph16i=Compare Halfword Immediate
|
|
e_cmphl=Compare Halfword Logical
|
|
e_cmphl16i=Compare Halfword Logical Immediate
|
|
e_cmpi=Compare Scaled Immediate Word
|
|
e_cmpl16i=Compare Logical Immediate Word
|
|
e_cmpli=Compare Logical Scaled Immediate Word
|
|
e_crand=Condition Register AND
|
|
e_crandc=Condition Register AND with Complement
|
|
e_creqv=Condition Register Equivalent
|
|
e_crnand=Condition Register NAND
|
|
e_crnor=Condition Register NOR
|
|
e_cror=Condition Register OR
|
|
e_crorc=Condition Register OR with Complement
|
|
e_crxor=Condition Register XOR
|
|
e_lbz=Load Byte and Zero
|
|
e_lbzu=Load Byte and Zero with Update
|
|
e_lha=Load Halfword Algebraic
|
|
e_lhau=Load Halfword Algebraic with Update
|
|
e_lhz=Load Halfword and Zero
|
|
e_lhzu=Load Halfword and Zero with Update
|
|
e_li=Load Immediate; e_li r3, 1; r3 = 1
|
|
e_lis=Load Immediate Shifted; e_lis r3, 0x7; r3 = 0x70000; r3 = (0x7 << 16)
|
|
e_lmw=Load Multiple Word
|
|
e_lwz=Load Word and Zero
|
|
e_lwzu=Load Word and Zero with Update
|
|
e_mcrf=Move CR Field
|
|
e_mull2i=Multiply (2 operand) Low Immediate
|
|
e_mulli=Multiply Low Scaled Immediate
|
|
e_or2i=OR (two operand) Immediate
|
|
e_or2is=OR (2 operand) Immediate Shifted
|
|
e_ori=OR Scaled Immediate
|
|
e_ori.=OR Scaled Immediate
|
|
e_rlw=Rotate Left Word
|
|
e_rlw.=Rotate Left Word
|
|
e_rlwi=Rotate Left Word Immediate
|
|
e_rlwi.=Rotate Left Word Immediate
|
|
e_rlwimi=Rotate Left Word Immediate then Mask Insert
|
|
e_rlwinm=Rotate Left Word Immediate then AND with Mask,
|
|
e_sc=System Call
|
|
e_slwi=Shift Left Word Immediate
|
|
e_srwi=Shift Right Word Immediate
|
|
e_slwi.=Shift Left Word Immediate
|
|
e_srwi.=Shift Right Word Immediate
|
|
e_stb=Store Byte
|
|
e_stbu=Store Byte with Update
|
|
e_sth=Store Halfword
|
|
e_sthu=Store Halfword with Update
|
|
e_stmw=Store Multiple Word
|
|
e_stw=Store Word
|
|
e_stwu=Store Word with Update
|
|
e_subfic=Subtract From Scaled Immediate Carrying
|
|
e_subfic.=Subtract From Scaled Immediate Carrying
|
|
e_xori=XOR Scaled Immediate
|
|
e_xori.=XOR Scaled Immediate
|
|
se_add=Add Short Form
|
|
se_addi=Add Immediate Short Form
|
|
se_and=AND Short Form
|
|
se_and.=AND Short Form
|
|
se_andc=AND with Complement Short Form
|
|
se_andi=AND Immediate Short Form
|
|
se_b=Branch
|
|
se_bl=Branch and Link
|
|
se_bc=Branch Conditional Short Form
|
|
se_bclri=Bit Clear Immediate
|
|
se_bctr=Branch to Count Register
|
|
se_bctrl=Branch to Count Register and Link
|
|
se_bgeni=Bit Generate Immediate
|
|
se_blr=Branch to Link Register
|
|
se_blrl=Branch to Link Register and Link
|
|
se_bmaski=Bit Mask Generate Immediate
|
|
se_bseti=Bit Set Immediate
|
|
se_btsti=Bit Test Immediate
|
|
se_cmp=Compare Word
|
|
se_cmph=Compare Halfword Short Form
|
|
se_cmphl=Compare Halfword Logical Short Form
|
|
se_cmpi=Compare Immediate Word Short Form
|
|
se_cmpl=Compare Logical Word
|
|
se_cmpli=Compare Logical Immediate Word
|
|
se_extsb=Extend Sign Byte Short Form
|
|
se_extsh=Extend Sign Halfword Short Form
|
|
se_extzb=Extend Zero Byte
|
|
se_extzh=Extend Zero Halfword
|
|
se_illegal=Illegal Instruction
|
|
se_isync=Instruction Synchronize
|
|
se_lbz=Load Byte and Zero Short Form
|
|
se_lhz=Load Halfword and Zero Short Form
|
|
se_li=Load Immediate Short Form; se_li r3, 1; r3 = 1
|
|
se_lwz=Load Word and Zero Short Form
|
|
se_mfar=Move from Alternate Register
|
|
se_mfctr=Move From Count Register
|
|
se_mflr=Move From Link Register
|
|
se_mr=Move Register
|
|
se_mtar=Move To Alternate Register
|
|
se_mtctr=Move To Count Register
|
|
se_mtlr=Move To Link Register
|
|
se_mullw=Multiply Low Word Short Form
|
|
se_neg=Negate Short Form
|
|
se_not=NOT Short Form
|
|
se_or=OR Short Form
|
|
se_rfci=Return From Critical Interrupt
|
|
se_rfdi=Return From Debug Interrupt
|
|
se_rfgi=Return From Guest Interrupt
|
|
se_rfi=Return From Interrupt
|
|
se_rfmci=Return From Machine Check Interrupt
|
|
se_sc=System Call
|
|
se_slw=Shift Left Word
|
|
se_slwi=Shift Left Word Immediate Short Form
|
|
se_sraw=Shift Right Algebraic Word
|
|
se_srawi=Shift Right Algebraic Immediate
|
|
se_srw=Shift Right Word
|
|
se_srwi=Shift Right Word Immediate Short Form
|
|
se_stb=Store Byte Short Form
|
|
se_sth=Store Halfword Short Form
|
|
se_stw=Store Word Short Form
|
|
se_sub=Subtract
|
|
se_subf=Subtract From Short Form
|
|
se_subi=Subtract Immediate
|
|
se_subi.=Subtract Immediate
|
|
# Pseudo instructions (incomplete)
|
|
addic=Add Immediate Carrying and Record ; addic r3, r3, 1
|
|
bctrl=Branch to Count Register and Link
|
|
bge=Branch if Greater Or Equal
|
|
blr=Branch to Link Register
|
|
blt=Branch if Less
|
|
beq=Branch if Equal bne=Branch if Not Equal
|
|
bnelr=Branch if Not Equal to Link Register
|
|
bt=Branch if condition true
|
|
cmplw=Compare Logical Word; cmplwi CR0, r0, 33(unsigned)
|
|
cmplwi=Compare Logical Word Immediate; cmplwi CR0, r0, 33(unsigned)
|
|
cmpw=Compare Word; cmpw CR0, r0, r1 (signed)
|
|
cmpwi=Compare Immediate Word
|
|
cmpwi=Compare Word Immediate
|
|
li=Load Immediate; li r3, 1; r3 = 1
|
|
lis=Load Immediate Shifted; lis r3, 0x7; r3 = 0x70000; r3 = (0x7 << 16)
|
|
lwz=Load Word And Zero
|
|
mflr=Move From Link Register
|
|
mr=Move To Register
|
|
mtctr=Move To Special Count Register
|
|
mtctrl=Move To Special Count Register and Link
|
|
mtlr=Move To Link Register
|
|
nop=No Operation
|
|
slwi=Shift Left Word Immediate
|
|
srwi=Shift Right Word Immedate
|
|
bdz=Decrement CTR and Branch if its Zero
|
|
bdzt=Decrement CTR and Branch if its Zero (true)
|
|
bdzf=Decrement CTR and Branch if its Zero (false)
|
|
bgt=Branch if Greater Than
|
|
bnl=Branch if Not Less
|
|
ble=Branch if Less or Equal
|
|
crclr=Conditional Register clear (xor nth bit with itself)
|
|
crnot=CR NOT
|
|
crset=cR Set
|
|
clrlwi=Clear Left immediate
|
|
extlwi=Extract and Left Justify Immediate
|
|
extrwi=Extract and Right Justify Immediate
|
|
inslwi=Insert from left immediate
|
|
insrwi=Insert from right immediate
|