mirror of
https://github.com/xemu-project/xemu.git
synced 2024-12-13 23:08:57 +00:00
target-arm queue:
* i.MX: move i.MX31 CCM object to register array * xilinx_axidma: remove dead code * disas/libvixl: Update to upstream VIXL 1.12 * virt: Support legacy -nic command line syntax -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJWk9LmAAoJEDwlJe0UNgzeTNoP/ilct1jgZH845KiO8mU0MgS3 ab6g0qRVqmWpsuVsXNTahKvJDDubpnwGBTP84tlfIkn271nLq24ihN/bHX6smQAr fHXaIyvOx/DyjdnIY0KknYlwMrY2Ov8jAPu7i23yJCdz+rr9Ps8Jn5/9Sxc00aGR OQOVQ7o87e9eEtyiktINpCK7e0yETVeOl1XFvWw43Qq6cDldolplqdHs4S7cL67b GS6QC9zrPCcJlyfW576DDkeknorfn+H01x/3uJVK9zn6N8XIVsA2Yy/xQ6+VMtYX fnvSjFlpRUJO/FZlbihdoKkZX3VKR5+h+v/ZttHnjMT1pDs1EZkcQ9COAx5/dP80 4WkilF58fPft83dEBgFaOXA12lg/OQi6vZ0IDh9dAWJv4OAQLLHnPh0NHIvqoLLH Rs50hHPQGWR/7A16PkdeveEEGC2ROkacRJrXOo1OMhLDEbf7eerzQBmkI3Fi1x65 rUw4SbyXLOzNgyGvW7+53qfM1Em4kDKkNJPvy2FB0yiqeavKoU+OfEUArDdGUAY2 A6Gyl5UTW4VhXc5RsfbURmf6UinjW4vRWvX8S9ISBiy1YehZdmDYlBhuo64SYSR7 f3THyPNASFAmzUozAevESiCKYOrHix+4B7C+6wA+a/oHy6g7+7pdDctuTJLwpxmJ vvVWUiCSuh95fsA265mZ =h+v5 -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20160111-1' into staging target-arm queue: * i.MX: move i.MX31 CCM object to register array * xilinx_axidma: remove dead code * disas/libvixl: Update to upstream VIXL 1.12 * virt: Support legacy -nic command line syntax # gpg: Signature made Mon 11 Jan 2016 16:05:58 GMT using RSA key ID 14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" * remotes/pmaydell/tags/pull-target-arm-20160111-1: hw/arm/virt: Support legacy -nic command line syntax disas/libvixl: Update to upstream VIXL 1.12 hw/dma/xilinx_axidma: remove dead code i.MX: move i.MX31 CCM object to register array Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
7b8a354d47
@ -17,7 +17,7 @@
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "a64/disasm-a64.h"
|
||||
#include "vixl/a64/disasm-a64.h"
|
||||
|
||||
extern "C" {
|
||||
#include "disas/bfd.h"
|
||||
|
@ -1,7 +1,8 @@
|
||||
libvixl_OBJS = utils.o \
|
||||
a64/instructions-a64.o \
|
||||
a64/decoder-a64.o \
|
||||
a64/disasm-a64.o
|
||||
libvixl_OBJS = vixl/utils.o \
|
||||
vixl/compiler-intrinsics.o \
|
||||
vixl/a64/instructions-a64.o \
|
||||
vixl/a64/decoder-a64.o \
|
||||
vixl/a64/disasm-a64.o
|
||||
|
||||
$(addprefix $(obj)/,$(libvixl_OBJS)): QEMU_CFLAGS := -I$(SRC_PATH)/disas/libvixl $(QEMU_CFLAGS)
|
||||
|
||||
|
@ -2,11 +2,10 @@
|
||||
The code in this directory is a subset of libvixl:
|
||||
https://github.com/armvixl/vixl
|
||||
(specifically, it is the set of files needed for disassembly only,
|
||||
taken from libvixl 1.7).
|
||||
taken from libvixl 1.12).
|
||||
Bugfixes should preferably be sent upstream initially.
|
||||
|
||||
The disassembler does not currently support the entire A64 instruction
|
||||
set. Notably:
|
||||
* No Advanced SIMD support.
|
||||
* Limited support for system instructions.
|
||||
* A few miscellaneous integer and floating point instructions are missing.
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,314 +0,0 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "a64/instructions-a64.h"
|
||||
#include "a64/assembler-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
|
||||
// Floating-point infinity values.
|
||||
const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
|
||||
const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
|
||||
const double kFP64PositiveInfinity =
|
||||
rawbits_to_double(UINT64_C(0x7ff0000000000000));
|
||||
const double kFP64NegativeInfinity =
|
||||
rawbits_to_double(UINT64_C(0xfff0000000000000));
|
||||
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
|
||||
const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
|
||||
|
||||
|
||||
static uint64_t RotateRight(uint64_t value,
|
||||
unsigned int rotate,
|
||||
unsigned int width) {
|
||||
VIXL_ASSERT(width <= 64);
|
||||
rotate &= 63;
|
||||
return ((value & ((UINT64_C(1) << rotate) - 1)) <<
|
||||
(width - rotate)) | (value >> rotate);
|
||||
}
|
||||
|
||||
|
||||
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
|
||||
uint64_t value,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
|
||||
(width == 32));
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
uint64_t result = value & ((UINT64_C(1) << width) - 1);
|
||||
for (unsigned i = width; i < reg_size; i *= 2) {
|
||||
result |= (result << i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsLoad() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) != 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
|
||||
switch (op) {
|
||||
case LDRB_w:
|
||||
case LDRH_w:
|
||||
case LDR_w:
|
||||
case LDR_x:
|
||||
case LDRSB_w:
|
||||
case LDRSB_x:
|
||||
case LDRSH_w:
|
||||
case LDRSH_x:
|
||||
case LDRSW_x:
|
||||
case LDR_s:
|
||||
case LDR_d: return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsStore() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) == 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
|
||||
switch (op) {
|
||||
case STRB_w:
|
||||
case STRH_w:
|
||||
case STR_w:
|
||||
case STR_x:
|
||||
case STR_s:
|
||||
case STR_d: return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Logical immediates can't encode zero, so a return value of zero is used to
|
||||
// indicate a failure case. Specifically, where the constraints on imm_s are
|
||||
// not met.
|
||||
uint64_t Instruction::ImmLogical() const {
|
||||
unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
|
||||
int64_t n = BitN();
|
||||
int64_t imm_s = ImmSetBits();
|
||||
int64_t imm_r = ImmRotate();
|
||||
|
||||
// An integer is constructed from the n, imm_s and imm_r bits according to
|
||||
// the following table:
|
||||
//
|
||||
// N imms immr size S R
|
||||
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
|
||||
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
|
||||
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
|
||||
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
|
||||
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
|
||||
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
|
||||
// (s bits must not be all set)
|
||||
//
|
||||
// A pattern is constructed of size bits, where the least significant S+1
|
||||
// bits are set. The pattern is rotated right by R, and repeated across a
|
||||
// 32 or 64-bit value, depending on destination register width.
|
||||
//
|
||||
|
||||
if (n == 1) {
|
||||
if (imm_s == 0x3F) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
|
||||
return RotateRight(bits, imm_r, 64);
|
||||
} else {
|
||||
if ((imm_s >> 1) == 0x1F) {
|
||||
return 0;
|
||||
}
|
||||
for (int width = 0x20; width >= 0x2; width >>= 1) {
|
||||
if ((imm_s & width) == 0) {
|
||||
int mask = width - 1;
|
||||
if ((imm_s & mask) == mask) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
|
||||
return RepeatBitsAcrossReg(reg_size,
|
||||
RotateRight(bits, imm_r & mask, width),
|
||||
width);
|
||||
}
|
||||
}
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
float Instruction::ImmFP32() const {
|
||||
// ImmFP: abcdefgh (8 bits)
|
||||
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
|
||||
// where B is b ^ 1
|
||||
uint32_t bits = ImmFP();
|
||||
uint32_t bit7 = (bits >> 7) & 0x1;
|
||||
uint32_t bit6 = (bits >> 6) & 0x1;
|
||||
uint32_t bit5_to_0 = bits & 0x3f;
|
||||
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
|
||||
|
||||
return rawbits_to_float(result);
|
||||
}
|
||||
|
||||
|
||||
double Instruction::ImmFP64() const {
|
||||
// ImmFP: abcdefgh (8 bits)
|
||||
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
|
||||
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
|
||||
// where B is b ^ 1
|
||||
uint32_t bits = ImmFP();
|
||||
uint64_t bit7 = (bits >> 7) & 0x1;
|
||||
uint64_t bit6 = (bits >> 6) & 0x1;
|
||||
uint64_t bit5_to_0 = bits & 0x3f;
|
||||
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
|
||||
|
||||
return rawbits_to_double(result);
|
||||
}
|
||||
|
||||
|
||||
LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
|
||||
switch (op) {
|
||||
case STP_x:
|
||||
case LDP_x:
|
||||
case STP_d:
|
||||
case LDP_d: return LSDoubleWord;
|
||||
default: return LSWord;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const Instruction* Instruction::ImmPCOffsetTarget() const {
|
||||
const Instruction * base = this;
|
||||
ptrdiff_t offset;
|
||||
if (IsPCRelAddressing()) {
|
||||
// ADR and ADRP.
|
||||
offset = ImmPCRel();
|
||||
if (Mask(PCRelAddressingMask) == ADRP) {
|
||||
base = AlignDown(base, kPageSize);
|
||||
offset *= kPageSize;
|
||||
} else {
|
||||
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
|
||||
}
|
||||
} else {
|
||||
// All PC-relative branches.
|
||||
VIXL_ASSERT(BranchType() != UnknownBranchType);
|
||||
// Relative branch offsets are instruction-size-aligned.
|
||||
offset = ImmBranch() << kInstructionSizeLog2;
|
||||
}
|
||||
return base + offset;
|
||||
}
|
||||
|
||||
|
||||
inline int Instruction::ImmBranch() const {
|
||||
switch (BranchType()) {
|
||||
case CondBranchType: return ImmCondBranch();
|
||||
case UncondBranchType: return ImmUncondBranch();
|
||||
case CompareBranchType: return ImmCmpBranch();
|
||||
case TestBranchType: return ImmTestBranch();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
|
||||
if (IsPCRelAddressing()) {
|
||||
SetPCRelImmTarget(target);
|
||||
} else {
|
||||
SetBranchImmTarget(target);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetPCRelImmTarget(const Instruction* target) {
|
||||
int32_t imm21;
|
||||
if ((Mask(PCRelAddressingMask) == ADR)) {
|
||||
imm21 = target - this;
|
||||
} else {
|
||||
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
|
||||
uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
|
||||
uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
|
||||
imm21 = target_page - this_page;
|
||||
}
|
||||
Instr imm = Assembler::ImmPCRelAddress(imm21);
|
||||
|
||||
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetBranchImmTarget(const Instruction* target) {
|
||||
VIXL_ASSERT(((target - this) & 3) == 0);
|
||||
Instr branch_imm = 0;
|
||||
uint32_t imm_mask = 0;
|
||||
int offset = (target - this) >> kInstructionSizeLog2;
|
||||
switch (BranchType()) {
|
||||
case CondBranchType: {
|
||||
branch_imm = Assembler::ImmCondBranch(offset);
|
||||
imm_mask = ImmCondBranch_mask;
|
||||
break;
|
||||
}
|
||||
case UncondBranchType: {
|
||||
branch_imm = Assembler::ImmUncondBranch(offset);
|
||||
imm_mask = ImmUncondBranch_mask;
|
||||
break;
|
||||
}
|
||||
case CompareBranchType: {
|
||||
branch_imm = Assembler::ImmCmpBranch(offset);
|
||||
imm_mask = ImmCmpBranch_mask;
|
||||
break;
|
||||
}
|
||||
case TestBranchType: {
|
||||
branch_imm = Assembler::ImmTestBranch(offset);
|
||||
imm_mask = ImmTestBranch_mask;
|
||||
break;
|
||||
}
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
SetInstructionBits(Mask(~imm_mask) | branch_imm);
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetImmLLiteral(const Instruction* source) {
|
||||
VIXL_ASSERT(IsWordAligned(source));
|
||||
ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
|
||||
Instr imm = Assembler::ImmLLiteral(offset);
|
||||
Instr mask = ImmLLiteral_mask;
|
||||
|
||||
SetInstructionBits(Mask(~mask) | imm);
|
||||
}
|
||||
} // namespace vixl
|
||||
|
@ -1,384 +0,0 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
|
||||
#define VIXL_A64_INSTRUCTIONS_A64_H_
|
||||
|
||||
#include "globals.h"
|
||||
#include "utils.h"
|
||||
#include "a64/constants-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
// ISA constants. --------------------------------------------------------------
|
||||
|
||||
typedef uint32_t Instr;
|
||||
const unsigned kInstructionSize = 4;
|
||||
const unsigned kInstructionSizeLog2 = 2;
|
||||
const unsigned kLiteralEntrySize = 4;
|
||||
const unsigned kLiteralEntrySizeLog2 = 2;
|
||||
const unsigned kMaxLoadLiteralRange = 1 * MBytes;
|
||||
|
||||
// This is the nominal page size (as used by the adrp instruction); the actual
|
||||
// size of the memory pages allocated by the kernel is likely to differ.
|
||||
const unsigned kPageSize = 4 * KBytes;
|
||||
const unsigned kPageSizeLog2 = 12;
|
||||
|
||||
const unsigned kWRegSize = 32;
|
||||
const unsigned kWRegSizeLog2 = 5;
|
||||
const unsigned kWRegSizeInBytes = kWRegSize / 8;
|
||||
const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
|
||||
const unsigned kXRegSize = 64;
|
||||
const unsigned kXRegSizeLog2 = 6;
|
||||
const unsigned kXRegSizeInBytes = kXRegSize / 8;
|
||||
const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
|
||||
const unsigned kSRegSize = 32;
|
||||
const unsigned kSRegSizeLog2 = 5;
|
||||
const unsigned kSRegSizeInBytes = kSRegSize / 8;
|
||||
const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
|
||||
const unsigned kDRegSize = 64;
|
||||
const unsigned kDRegSizeLog2 = 6;
|
||||
const unsigned kDRegSizeInBytes = kDRegSize / 8;
|
||||
const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
|
||||
const uint64_t kWRegMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kSRegMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kSSignMask = UINT64_C(0x80000000);
|
||||
const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
|
||||
const uint64_t kWSignMask = UINT64_C(0x80000000);
|
||||
const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
|
||||
const uint64_t kByteMask = UINT64_C(0xff);
|
||||
const uint64_t kHalfWordMask = UINT64_C(0xffff);
|
||||
const uint64_t kWordMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
|
||||
const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
|
||||
const int64_t kXMinInt = INT64_C(0x8000000000000000);
|
||||
const int32_t kWMaxInt = INT32_C(0x7fffffff);
|
||||
const int32_t kWMinInt = INT32_C(0x80000000);
|
||||
const unsigned kLinkRegCode = 30;
|
||||
const unsigned kZeroRegCode = 31;
|
||||
const unsigned kSPRegInternalCode = 63;
|
||||
const unsigned kRegCodeMask = 0x1f;
|
||||
|
||||
const unsigned kAddressTagOffset = 56;
|
||||
const unsigned kAddressTagWidth = 8;
|
||||
const uint64_t kAddressTagMask =
|
||||
((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
|
||||
VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
|
||||
|
||||
// AArch64 floating-point specifics. These match IEEE-754.
|
||||
const unsigned kDoubleMantissaBits = 52;
|
||||
const unsigned kDoubleExponentBits = 11;
|
||||
const unsigned kFloatMantissaBits = 23;
|
||||
const unsigned kFloatExponentBits = 8;
|
||||
|
||||
// Floating-point infinity values.
|
||||
extern const float kFP32PositiveInfinity;
|
||||
extern const float kFP32NegativeInfinity;
|
||||
extern const double kFP64PositiveInfinity;
|
||||
extern const double kFP64NegativeInfinity;
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
extern const double kFP64DefaultNaN;
|
||||
extern const float kFP32DefaultNaN;
|
||||
|
||||
|
||||
enum LSDataSize {
|
||||
LSByte = 0,
|
||||
LSHalfword = 1,
|
||||
LSWord = 2,
|
||||
LSDoubleWord = 3
|
||||
};
|
||||
|
||||
LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
|
||||
|
||||
enum ImmBranchType {
|
||||
UnknownBranchType = 0,
|
||||
CondBranchType = 1,
|
||||
UncondBranchType = 2,
|
||||
CompareBranchType = 3,
|
||||
TestBranchType = 4
|
||||
};
|
||||
|
||||
enum AddrMode {
|
||||
Offset,
|
||||
PreIndex,
|
||||
PostIndex
|
||||
};
|
||||
|
||||
enum FPRounding {
|
||||
// The first four values are encodable directly by FPCR<RMode>.
|
||||
FPTieEven = 0x0,
|
||||
FPPositiveInfinity = 0x1,
|
||||
FPNegativeInfinity = 0x2,
|
||||
FPZero = 0x3,
|
||||
|
||||
// The final rounding mode is only available when explicitly specified by the
|
||||
// instruction (such as with fcvta). It cannot be set in FPCR.
|
||||
FPTieAway
|
||||
};
|
||||
|
||||
enum Reg31Mode {
|
||||
Reg31IsStackPointer,
|
||||
Reg31IsZeroRegister
|
||||
};
|
||||
|
||||
// Instructions. ---------------------------------------------------------------
|
||||
|
||||
class Instruction {
|
||||
public:
|
||||
Instr InstructionBits() const {
|
||||
return *(reinterpret_cast<const Instr*>(this));
|
||||
}
|
||||
|
||||
void SetInstructionBits(Instr new_instr) {
|
||||
*(reinterpret_cast<Instr*>(this)) = new_instr;
|
||||
}
|
||||
|
||||
int Bit(int pos) const {
|
||||
return (InstructionBits() >> pos) & 1;
|
||||
}
|
||||
|
||||
uint32_t Bits(int msb, int lsb) const {
|
||||
return unsigned_bitextract_32(msb, lsb, InstructionBits());
|
||||
}
|
||||
|
||||
int32_t SignedBits(int msb, int lsb) const {
|
||||
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
|
||||
return signed_bitextract_32(msb, lsb, bits);
|
||||
}
|
||||
|
||||
Instr Mask(uint32_t mask) const {
|
||||
return InstructionBits() & mask;
|
||||
}
|
||||
|
||||
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
|
||||
int64_t Name() const { return Func(HighBit, LowBit); }
|
||||
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
|
||||
#undef DEFINE_GETTER
|
||||
|
||||
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
|
||||
// formed from ImmPCRelLo and ImmPCRelHi.
|
||||
int ImmPCRel() const {
|
||||
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
|
||||
int const width = ImmPCRelLo_width + ImmPCRelHi_width;
|
||||
return signed_bitextract_32(width-1, 0, offset);
|
||||
}
|
||||
|
||||
uint64_t ImmLogical() const;
|
||||
float ImmFP32() const;
|
||||
double ImmFP64() const;
|
||||
|
||||
LSDataSize SizeLSPair() const {
|
||||
return CalcLSPairDataSize(
|
||||
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
|
||||
}
|
||||
|
||||
// Helpers.
|
||||
bool IsCondBranchImm() const {
|
||||
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsUncondBranchImm() const {
|
||||
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsCompareBranch() const {
|
||||
return Mask(CompareBranchFMask) == CompareBranchFixed;
|
||||
}
|
||||
|
||||
bool IsTestBranch() const {
|
||||
return Mask(TestBranchFMask) == TestBranchFixed;
|
||||
}
|
||||
|
||||
bool IsPCRelAddressing() const {
|
||||
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
|
||||
}
|
||||
|
||||
bool IsLogicalImmediate() const {
|
||||
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubImmediate() const {
|
||||
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubExtended() const {
|
||||
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
|
||||
}
|
||||
|
||||
bool IsLoadOrStore() const {
|
||||
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
|
||||
}
|
||||
|
||||
bool IsLoad() const;
|
||||
bool IsStore() const;
|
||||
|
||||
bool IsLoadLiteral() const {
|
||||
// This includes PRFM_lit.
|
||||
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
|
||||
}
|
||||
|
||||
bool IsMovn() const {
|
||||
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
|
||||
(Mask(MoveWideImmediateMask) == MOVN_w);
|
||||
}
|
||||
|
||||
// Indicate whether Rd can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rd field.
|
||||
Reg31Mode RdMode() const {
|
||||
// The following instructions use sp or wsp as Rd:
|
||||
// Add/sub (immediate) when not setting the flags.
|
||||
// Add/sub (extended) when not setting the flags.
|
||||
// Logical (immediate) when not setting the flags.
|
||||
// Otherwise, r31 is the zero register.
|
||||
if (IsAddSubImmediate() || IsAddSubExtended()) {
|
||||
if (Mask(AddSubSetFlagsBit)) {
|
||||
return Reg31IsZeroRegister;
|
||||
} else {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
}
|
||||
if (IsLogicalImmediate()) {
|
||||
// Of the logical (immediate) instructions, only ANDS (and its aliases)
|
||||
// can set the flags. The others can all write into sp.
|
||||
// Note that some logical operations are not available to
|
||||
// immediate-operand instructions, so we have to combine two masks here.
|
||||
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
|
||||
return Reg31IsZeroRegister;
|
||||
} else {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
}
|
||||
return Reg31IsZeroRegister;
|
||||
}
|
||||
|
||||
// Indicate whether Rn can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rn field.
|
||||
Reg31Mode RnMode() const {
|
||||
// The following instructions use sp or wsp as Rn:
|
||||
// All loads and stores.
|
||||
// Add/sub (immediate).
|
||||
// Add/sub (extended).
|
||||
// Otherwise, r31 is the zero register.
|
||||
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
return Reg31IsZeroRegister;
|
||||
}
|
||||
|
||||
ImmBranchType BranchType() const {
|
||||
if (IsCondBranchImm()) {
|
||||
return CondBranchType;
|
||||
} else if (IsUncondBranchImm()) {
|
||||
return UncondBranchType;
|
||||
} else if (IsCompareBranch()) {
|
||||
return CompareBranchType;
|
||||
} else if (IsTestBranch()) {
|
||||
return TestBranchType;
|
||||
} else {
|
||||
return UnknownBranchType;
|
||||
}
|
||||
}
|
||||
|
||||
// Find the target of this instruction. 'this' may be a branch or a
|
||||
// PC-relative addressing instruction.
|
||||
const Instruction* ImmPCOffsetTarget() const;
|
||||
|
||||
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
|
||||
// a PC-relative addressing instruction.
|
||||
void SetImmPCOffsetTarget(const Instruction* target);
|
||||
// Patch a literal load instruction to load from 'source'.
|
||||
void SetImmLLiteral(const Instruction* source);
|
||||
|
||||
// Calculate the address of a literal referred to by a load-literal
|
||||
// instruction, and return it as the specified type.
|
||||
//
|
||||
// The literal itself is safely mutable only if the backing buffer is safely
|
||||
// mutable.
|
||||
template <typename T>
|
||||
T LiteralAddress() const {
|
||||
uint64_t base_raw = reinterpret_cast<uintptr_t>(this);
|
||||
ptrdiff_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
|
||||
uint64_t address_raw = base_raw + offset;
|
||||
|
||||
// Cast the address using a C-style cast. A reinterpret_cast would be
|
||||
// appropriate, but it can't cast one integral type to another.
|
||||
T address = (T)(address_raw);
|
||||
|
||||
// Assert that the address can be represented by the specified type.
|
||||
VIXL_ASSERT((uint64_t)(address) == address_raw);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
uint32_t Literal32() const {
|
||||
uint32_t literal;
|
||||
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
|
||||
return literal;
|
||||
}
|
||||
|
||||
uint64_t Literal64() const {
|
||||
uint64_t literal;
|
||||
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
|
||||
return literal;
|
||||
}
|
||||
|
||||
float LiteralFP32() const {
|
||||
return rawbits_to_float(Literal32());
|
||||
}
|
||||
|
||||
double LiteralFP64() const {
|
||||
return rawbits_to_double(Literal64());
|
||||
}
|
||||
|
||||
const Instruction* NextInstruction() const {
|
||||
return this + kInstructionSize;
|
||||
}
|
||||
|
||||
const Instruction* InstructionAtOffset(int64_t offset) const {
|
||||
VIXL_ASSERT(IsWordAligned(this + offset));
|
||||
return this + offset;
|
||||
}
|
||||
|
||||
template<typename T> static Instruction* Cast(T src) {
|
||||
return reinterpret_cast<Instruction*>(src);
|
||||
}
|
||||
|
||||
template<typename T> static const Instruction* CastConst(T src) {
|
||||
return reinterpret_cast<const Instruction*>(src);
|
||||
}
|
||||
|
||||
private:
|
||||
int ImmBranch() const;
|
||||
|
||||
void SetPCRelImmTarget(const Instruction* target);
|
||||
void SetBranchImmTarget(const Instruction* target);
|
||||
};
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_A64_INSTRUCTIONS_A64_H_
|
4624
disas/libvixl/vixl/a64/assembler-a64.h
Normal file
4624
disas/libvixl/vixl/a64/assembler-a64.h
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
@ -27,8 +27,8 @@
|
||||
#ifndef VIXL_CPU_A64_H
|
||||
#define VIXL_CPU_A64_H
|
||||
|
||||
#include "globals.h"
|
||||
#include "instructions-a64.h"
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/a64/instructions-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
@ -24,9 +24,9 @@
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "globals.h"
|
||||
#include "utils.h"
|
||||
#include "a64/decoder-a64.h"
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/utils.h"
|
||||
#include "vixl/a64/decoder-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
@ -271,6 +271,11 @@ void Decoder::DecodeLoadStore(const Instruction* instr) {
|
||||
(instr->Bits(27, 24) == 0x9) ||
|
||||
(instr->Bits(27, 24) == 0xC) ||
|
||||
(instr->Bits(27, 24) == 0xD) );
|
||||
// TODO(all): rearrange the tree to integrate this branch.
|
||||
if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
|
||||
DecodeNEONLoadStore(instr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(28) == 0) {
|
||||
@ -278,7 +283,7 @@ void Decoder::DecodeLoadStore(const Instruction* instr) {
|
||||
if (instr->Bit(26) == 0) {
|
||||
VisitLoadStoreExclusive(instr);
|
||||
} else {
|
||||
DecodeAdvSIMDLoadStore(instr);
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
} else {
|
||||
if ((instr->Bits(31, 30) == 0x3) ||
|
||||
@ -483,6 +488,7 @@ void Decoder::DecodeDataProcessing(const Instruction* instr) {
|
||||
case 6: {
|
||||
if (instr->Bit(29) == 0x1) {
|
||||
VisitUnallocated(instr);
|
||||
VIXL_FALLTHROUGH();
|
||||
} else {
|
||||
if (instr->Bit(30) == 0) {
|
||||
if ((instr->Bit(15) == 0x1) ||
|
||||
@ -556,18 +562,15 @@ void Decoder::DecodeDataProcessing(const Instruction* instr) {
|
||||
void Decoder::DecodeFP(const Instruction* instr) {
|
||||
VIXL_ASSERT((instr->Bits(27, 24) == 0xE) ||
|
||||
(instr->Bits(27, 24) == 0xF));
|
||||
|
||||
if (instr->Bit(28) == 0) {
|
||||
DecodeAdvSIMDDataProcessing(instr);
|
||||
DecodeNEONVectorDataProcessing(instr);
|
||||
} else {
|
||||
if (instr->Bit(29) == 1) {
|
||||
if (instr->Bits(31, 30) == 0x3) {
|
||||
VisitUnallocated(instr);
|
||||
} else if (instr->Bits(31, 30) == 0x1) {
|
||||
DecodeNEONScalarDataProcessing(instr);
|
||||
} else {
|
||||
if (instr->Bits(31, 30) == 0x3) {
|
||||
VisitUnallocated(instr);
|
||||
} else if (instr->Bits(31, 30) == 0x1) {
|
||||
DecodeAdvSIMDDataProcessing(instr);
|
||||
} else {
|
||||
if (instr->Bit(29) == 0) {
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(21) == 0) {
|
||||
if ((instr->Bit(23) == 1) ||
|
||||
@ -674,23 +677,190 @@ void Decoder::DecodeFP(const Instruction* instr) {
|
||||
VisitFPDataProcessing3Source(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeAdvSIMDLoadStore(const Instruction* instr) {
|
||||
// TODO: Implement Advanced SIMD load/store instruction decode.
|
||||
void Decoder::DecodeNEONLoadStore(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(29, 25) == 0x6);
|
||||
VisitUnimplemented(instr);
|
||||
if (instr->Bit(31) == 0) {
|
||||
if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
|
||||
VisitUnallocated(instr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (instr->Bit(23) == 0) {
|
||||
if (instr->Bits(20, 16) == 0) {
|
||||
if (instr->Bit(24) == 0) {
|
||||
VisitNEONLoadStoreMultiStruct(instr);
|
||||
} else {
|
||||
VisitNEONLoadStoreSingleStruct(instr);
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(24) == 0) {
|
||||
VisitNEONLoadStoreMultiStructPostIndex(instr);
|
||||
} else {
|
||||
VisitNEONLoadStoreSingleStructPostIndex(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeAdvSIMDDataProcessing(const Instruction* instr) {
|
||||
// TODO: Implement Advanced SIMD data processing instruction decode.
|
||||
VIXL_ASSERT(instr->Bits(27, 25) == 0x7);
|
||||
VisitUnimplemented(instr);
|
||||
void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(28, 25) == 0x7);
|
||||
if (instr->Bit(31) == 0) {
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(21) == 0) {
|
||||
if (instr->Bit(15) == 0) {
|
||||
if (instr->Bit(10) == 0) {
|
||||
if (instr->Bit(29) == 0) {
|
||||
if (instr->Bit(11) == 0) {
|
||||
VisitNEONTable(instr);
|
||||
} else {
|
||||
VisitNEONPerm(instr);
|
||||
}
|
||||
} else {
|
||||
VisitNEONExtract(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bits(23, 22) == 0) {
|
||||
VisitNEONCopy(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(10) == 0) {
|
||||
if (instr->Bit(11) == 0) {
|
||||
VisitNEON3Different(instr);
|
||||
} else {
|
||||
if (instr->Bits(18, 17) == 0) {
|
||||
if (instr->Bit(20) == 0) {
|
||||
if (instr->Bit(19) == 0) {
|
||||
VisitNEON2RegMisc(instr);
|
||||
} else {
|
||||
if (instr->Bits(30, 29) == 0x2) {
|
||||
VisitCryptoAES(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(19) == 0) {
|
||||
VisitNEONAcrossLanes(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitNEON3Same(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(10) == 0) {
|
||||
VisitNEONByIndexedElement(instr);
|
||||
} else {
|
||||
if (instr->Bit(23) == 0) {
|
||||
if (instr->Bits(22, 19) == 0) {
|
||||
VisitNEONModifiedImmediate(instr);
|
||||
} else {
|
||||
VisitNEONShiftImmediate(instr);
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) {
|
||||
VIXL_ASSERT(instr->Bits(28, 25) == 0xF);
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(21) == 0) {
|
||||
if (instr->Bit(15) == 0) {
|
||||
if (instr->Bit(10) == 0) {
|
||||
if (instr->Bit(29) == 0) {
|
||||
if (instr->Bit(11) == 0) {
|
||||
VisitCrypto3RegSHA(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bits(23, 22) == 0) {
|
||||
VisitNEONScalarCopy(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(10) == 0) {
|
||||
if (instr->Bit(11) == 0) {
|
||||
VisitNEONScalar3Diff(instr);
|
||||
} else {
|
||||
if (instr->Bits(18, 17) == 0) {
|
||||
if (instr->Bit(20) == 0) {
|
||||
if (instr->Bit(19) == 0) {
|
||||
VisitNEONScalar2RegMisc(instr);
|
||||
} else {
|
||||
if (instr->Bit(29) == 0) {
|
||||
VisitCrypto2RegSHA(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(19) == 0) {
|
||||
VisitNEONScalarPairwise(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
VisitNEONScalar3Same(instr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (instr->Bit(10) == 0) {
|
||||
VisitNEONScalarByIndexedElement(instr);
|
||||
} else {
|
||||
if (instr->Bit(23) == 0) {
|
||||
VisitNEONScalarShiftImmediate(instr);
|
||||
} else {
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
@ -29,13 +29,13 @@
|
||||
|
||||
#include <list>
|
||||
|
||||
#include "globals.h"
|
||||
#include "a64/instructions-a64.h"
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/a64/instructions-a64.h"
|
||||
|
||||
|
||||
// List macro containing all visitors needed by the decoder class.
|
||||
|
||||
#define VISITOR_LIST(V) \
|
||||
#define VISITOR_LIST_THAT_RETURN(V) \
|
||||
V(PCRelAddressing) \
|
||||
V(AddSubImmediate) \
|
||||
V(LogicalImmediate) \
|
||||
@ -79,8 +79,39 @@
|
||||
V(FPDataProcessing3Source) \
|
||||
V(FPIntegerConvert) \
|
||||
V(FPFixedPointConvert) \
|
||||
V(Unallocated) \
|
||||
V(Unimplemented)
|
||||
V(Crypto2RegSHA) \
|
||||
V(Crypto3RegSHA) \
|
||||
V(CryptoAES) \
|
||||
V(NEON2RegMisc) \
|
||||
V(NEON3Different) \
|
||||
V(NEON3Same) \
|
||||
V(NEONAcrossLanes) \
|
||||
V(NEONByIndexedElement) \
|
||||
V(NEONCopy) \
|
||||
V(NEONExtract) \
|
||||
V(NEONLoadStoreMultiStruct) \
|
||||
V(NEONLoadStoreMultiStructPostIndex) \
|
||||
V(NEONLoadStoreSingleStruct) \
|
||||
V(NEONLoadStoreSingleStructPostIndex) \
|
||||
V(NEONModifiedImmediate) \
|
||||
V(NEONScalar2RegMisc) \
|
||||
V(NEONScalar3Diff) \
|
||||
V(NEONScalar3Same) \
|
||||
V(NEONScalarByIndexedElement) \
|
||||
V(NEONScalarCopy) \
|
||||
V(NEONScalarPairwise) \
|
||||
V(NEONScalarShiftImmediate) \
|
||||
V(NEONShiftImmediate) \
|
||||
V(NEONTable) \
|
||||
V(NEONPerm) \
|
||||
|
||||
#define VISITOR_LIST_THAT_DONT_RETURN(V) \
|
||||
V(Unallocated) \
|
||||
V(Unimplemented) \
|
||||
|
||||
#define VISITOR_LIST(V) \
|
||||
VISITOR_LIST_THAT_RETURN(V) \
|
||||
VISITOR_LIST_THAT_DONT_RETURN(V) \
|
||||
|
||||
namespace vixl {
|
||||
|
||||
@ -222,12 +253,17 @@ class Decoder {
|
||||
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
|
||||
// and call the corresponding visitors.
|
||||
// On entry, instruction bits 29:25 = 0x6.
|
||||
void DecodeAdvSIMDLoadStore(const Instruction* instr);
|
||||
void DecodeNEONLoadStore(const Instruction* instr);
|
||||
|
||||
// Decode the Advanced SIMD (NEON) data processing part of the instruction
|
||||
// tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 27:25 = 0x7.
|
||||
void DecodeAdvSIMDDataProcessing(const Instruction* instr);
|
||||
// Decode the Advanced SIMD (NEON) vector data processing part of the
|
||||
// instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 28:25 = 0x7.
|
||||
void DecodeNEONVectorDataProcessing(const Instruction* instr);
|
||||
|
||||
// Decode the Advanced SIMD (NEON) scalar data processing part of the
|
||||
// instruction tree, and call the corresponding visitors.
|
||||
// On entry, instruction bits 28:25 = 0xF.
|
||||
void DecodeNEONScalarDataProcessing(const Instruction* instr);
|
||||
|
||||
private:
|
||||
// Visitors are registered in a list.
|
3487
disas/libvixl/vixl/a64/disasm-a64.cc
Normal file
3487
disas/libvixl/vixl/a64/disasm-a64.cc
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
@ -27,11 +27,11 @@
|
||||
#ifndef VIXL_A64_DISASM_A64_H
|
||||
#define VIXL_A64_DISASM_A64_H
|
||||
|
||||
#include "globals.h"
|
||||
#include "utils.h"
|
||||
#include "instructions-a64.h"
|
||||
#include "decoder-a64.h"
|
||||
#include "assembler-a64.h"
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/utils.h"
|
||||
#include "vixl/a64/instructions-a64.h"
|
||||
#include "vixl/a64/decoder-a64.h"
|
||||
#include "vixl/a64/assembler-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
@ -55,6 +55,7 @@ class Disassembler: public DecoderVisitor {
|
||||
// customize the disassembly output.
|
||||
|
||||
// Prints the name of a register.
|
||||
// TODO: This currently doesn't allow renaming of V registers.
|
||||
virtual void AppendRegisterNameToOutput(const Instruction* instr,
|
||||
const CPURegister& reg);
|
||||
|
||||
@ -122,7 +123,8 @@ class Disassembler: public DecoderVisitor {
|
||||
int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
|
||||
int SubstitutePrefetchField(const Instruction* instr, const char* format);
|
||||
int SubstituteBarrierField(const Instruction* instr, const char* format);
|
||||
|
||||
int SubstituteSysOpField(const Instruction* instr, const char* format);
|
||||
int SubstituteCrField(const Instruction* instr, const char* format);
|
||||
bool RdIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->Rd() == kZeroRegCode);
|
||||
}
|
||||
@ -163,7 +165,6 @@ class Disassembler: public DecoderVisitor {
|
||||
class PrintDisassembler: public Disassembler {
|
||||
public:
|
||||
explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
|
||||
virtual ~PrintDisassembler() { }
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction* instr);
|
622
disas/libvixl/vixl/a64/instructions-a64.cc
Normal file
622
disas/libvixl/vixl/a64/instructions-a64.cc
Normal file
@ -0,0 +1,622 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "vixl/a64/instructions-a64.h"
|
||||
#include "vixl/a64/assembler-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
|
||||
// Floating-point infinity values.
|
||||
const float16 kFP16PositiveInfinity = 0x7c00;
|
||||
const float16 kFP16NegativeInfinity = 0xfc00;
|
||||
const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
|
||||
const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
|
||||
const double kFP64PositiveInfinity =
|
||||
rawbits_to_double(UINT64_C(0x7ff0000000000000));
|
||||
const double kFP64NegativeInfinity =
|
||||
rawbits_to_double(UINT64_C(0xfff0000000000000));
|
||||
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
|
||||
const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
|
||||
const float16 kFP16DefaultNaN = 0x7e00;
|
||||
|
||||
|
||||
static uint64_t RotateRight(uint64_t value,
|
||||
unsigned int rotate,
|
||||
unsigned int width) {
|
||||
VIXL_ASSERT(width <= 64);
|
||||
rotate &= 63;
|
||||
return ((value & ((UINT64_C(1) << rotate) - 1)) <<
|
||||
(width - rotate)) | (value >> rotate);
|
||||
}
|
||||
|
||||
|
||||
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
|
||||
uint64_t value,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
|
||||
(width == 32));
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
uint64_t result = value & ((UINT64_C(1) << width) - 1);
|
||||
for (unsigned i = width; i < reg_size; i *= 2) {
|
||||
result |= (result << i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsLoad() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) != 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
|
||||
switch (op) {
|
||||
case LDRB_w:
|
||||
case LDRH_w:
|
||||
case LDR_w:
|
||||
case LDR_x:
|
||||
case LDRSB_w:
|
||||
case LDRSB_x:
|
||||
case LDRSH_w:
|
||||
case LDRSH_x:
|
||||
case LDRSW_x:
|
||||
case LDR_b:
|
||||
case LDR_h:
|
||||
case LDR_s:
|
||||
case LDR_d:
|
||||
case LDR_q: return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsStore() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) == 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
|
||||
switch (op) {
|
||||
case STRB_w:
|
||||
case STRH_w:
|
||||
case STR_w:
|
||||
case STR_x:
|
||||
case STR_b:
|
||||
case STR_h:
|
||||
case STR_s:
|
||||
case STR_d:
|
||||
case STR_q: return true;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Logical immediates can't encode zero, so a return value of zero is used to
|
||||
// indicate a failure case. Specifically, where the constraints on imm_s are
|
||||
// not met.
|
||||
uint64_t Instruction::ImmLogical() const {
|
||||
unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
|
||||
int32_t n = BitN();
|
||||
int32_t imm_s = ImmSetBits();
|
||||
int32_t imm_r = ImmRotate();
|
||||
|
||||
// An integer is constructed from the n, imm_s and imm_r bits according to
|
||||
// the following table:
|
||||
//
|
||||
// N imms immr size S R
|
||||
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
|
||||
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
|
||||
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
|
||||
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
|
||||
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
|
||||
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
|
||||
// (s bits must not be all set)
|
||||
//
|
||||
// A pattern is constructed of size bits, where the least significant S+1
|
||||
// bits are set. The pattern is rotated right by R, and repeated across a
|
||||
// 32 or 64-bit value, depending on destination register width.
|
||||
//
|
||||
|
||||
if (n == 1) {
|
||||
if (imm_s == 0x3f) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
|
||||
return RotateRight(bits, imm_r, 64);
|
||||
} else {
|
||||
if ((imm_s >> 1) == 0x1f) {
|
||||
return 0;
|
||||
}
|
||||
for (int width = 0x20; width >= 0x2; width >>= 1) {
|
||||
if ((imm_s & width) == 0) {
|
||||
int mask = width - 1;
|
||||
if ((imm_s & mask) == mask) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
|
||||
return RepeatBitsAcrossReg(reg_size,
|
||||
RotateRight(bits, imm_r & mask, width),
|
||||
width);
|
||||
}
|
||||
}
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
uint32_t Instruction::ImmNEONabcdefgh() const {
|
||||
return ImmNEONabc() << 5 | ImmNEONdefgh();
|
||||
}
|
||||
|
||||
|
||||
float Instruction::Imm8ToFP32(uint32_t imm8) {
|
||||
// Imm8: abcdefgh (8 bits)
|
||||
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
|
||||
// where B is b ^ 1
|
||||
uint32_t bits = imm8;
|
||||
uint32_t bit7 = (bits >> 7) & 0x1;
|
||||
uint32_t bit6 = (bits >> 6) & 0x1;
|
||||
uint32_t bit5_to_0 = bits & 0x3f;
|
||||
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
|
||||
|
||||
return rawbits_to_float(result);
|
||||
}
|
||||
|
||||
|
||||
float Instruction::ImmFP32() const {
|
||||
return Imm8ToFP32(ImmFP());
|
||||
}
|
||||
|
||||
|
||||
double Instruction::Imm8ToFP64(uint32_t imm8) {
|
||||
// Imm8: abcdefgh (8 bits)
|
||||
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
|
||||
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
|
||||
// where B is b ^ 1
|
||||
uint32_t bits = imm8;
|
||||
uint64_t bit7 = (bits >> 7) & 0x1;
|
||||
uint64_t bit6 = (bits >> 6) & 0x1;
|
||||
uint64_t bit5_to_0 = bits & 0x3f;
|
||||
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
|
||||
|
||||
return rawbits_to_double(result);
|
||||
}
|
||||
|
||||
|
||||
double Instruction::ImmFP64() const {
|
||||
return Imm8ToFP64(ImmFP());
|
||||
}
|
||||
|
||||
|
||||
float Instruction::ImmNEONFP32() const {
|
||||
return Imm8ToFP32(ImmNEONabcdefgh());
|
||||
}
|
||||
|
||||
|
||||
double Instruction::ImmNEONFP64() const {
|
||||
return Imm8ToFP64(ImmNEONabcdefgh());
|
||||
}
|
||||
|
||||
|
||||
unsigned CalcLSDataSize(LoadStoreOp op) {
|
||||
VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
|
||||
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
|
||||
if ((op & LSVector_mask) != 0) {
|
||||
// Vector register memory operations encode the access size in the "size"
|
||||
// and "opc" fields.
|
||||
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
|
||||
size = kQRegSizeInBytesLog2;
|
||||
}
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
unsigned CalcLSPairDataSize(LoadStorePairOp op) {
|
||||
VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
|
||||
VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
|
||||
switch (op) {
|
||||
case STP_q:
|
||||
case LDP_q: return kQRegSizeInBytesLog2;
|
||||
case STP_x:
|
||||
case LDP_x:
|
||||
case STP_d:
|
||||
case LDP_d: return kXRegSizeInBytesLog2;
|
||||
default: return kWRegSizeInBytesLog2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) {
|
||||
switch (branch_type) {
|
||||
case UncondBranchType:
|
||||
return ImmUncondBranch_width;
|
||||
case CondBranchType:
|
||||
return ImmCondBranch_width;
|
||||
case CompareBranchType:
|
||||
return ImmCmpBranch_width;
|
||||
case TestBranchType:
|
||||
return ImmTestBranch_width;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) {
|
||||
int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1);
|
||||
return encoded_max * kInstructionSize;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
|
||||
int64_t offset) {
|
||||
return is_intn(ImmBranchRangeBitwidth(branch_type), offset);
|
||||
}
|
||||
|
||||
|
||||
const Instruction* Instruction::ImmPCOffsetTarget() const {
|
||||
const Instruction * base = this;
|
||||
ptrdiff_t offset;
|
||||
if (IsPCRelAddressing()) {
|
||||
// ADR and ADRP.
|
||||
offset = ImmPCRel();
|
||||
if (Mask(PCRelAddressingMask) == ADRP) {
|
||||
base = AlignDown(base, kPageSize);
|
||||
offset *= kPageSize;
|
||||
} else {
|
||||
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
|
||||
}
|
||||
} else {
|
||||
// All PC-relative branches.
|
||||
VIXL_ASSERT(BranchType() != UnknownBranchType);
|
||||
// Relative branch offsets are instruction-size-aligned.
|
||||
offset = ImmBranch() << kInstructionSizeLog2;
|
||||
}
|
||||
return base + offset;
|
||||
}
|
||||
|
||||
|
||||
int Instruction::ImmBranch() const {
|
||||
switch (BranchType()) {
|
||||
case CondBranchType: return ImmCondBranch();
|
||||
case UncondBranchType: return ImmUncondBranch();
|
||||
case CompareBranchType: return ImmCmpBranch();
|
||||
case TestBranchType: return ImmTestBranch();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
|
||||
if (IsPCRelAddressing()) {
|
||||
SetPCRelImmTarget(target);
|
||||
} else {
|
||||
SetBranchImmTarget(target);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetPCRelImmTarget(const Instruction* target) {
|
||||
ptrdiff_t imm21;
|
||||
if ((Mask(PCRelAddressingMask) == ADR)) {
|
||||
imm21 = target - this;
|
||||
} else {
|
||||
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
|
||||
uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
|
||||
uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
|
||||
imm21 = target_page - this_page;
|
||||
}
|
||||
Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
|
||||
|
||||
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetBranchImmTarget(const Instruction* target) {
|
||||
VIXL_ASSERT(((target - this) & 3) == 0);
|
||||
Instr branch_imm = 0;
|
||||
uint32_t imm_mask = 0;
|
||||
int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
|
||||
switch (BranchType()) {
|
||||
case CondBranchType: {
|
||||
branch_imm = Assembler::ImmCondBranch(offset);
|
||||
imm_mask = ImmCondBranch_mask;
|
||||
break;
|
||||
}
|
||||
case UncondBranchType: {
|
||||
branch_imm = Assembler::ImmUncondBranch(offset);
|
||||
imm_mask = ImmUncondBranch_mask;
|
||||
break;
|
||||
}
|
||||
case CompareBranchType: {
|
||||
branch_imm = Assembler::ImmCmpBranch(offset);
|
||||
imm_mask = ImmCmpBranch_mask;
|
||||
break;
|
||||
}
|
||||
case TestBranchType: {
|
||||
branch_imm = Assembler::ImmTestBranch(offset);
|
||||
imm_mask = ImmTestBranch_mask;
|
||||
break;
|
||||
}
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
SetInstructionBits(Mask(~imm_mask) | branch_imm);
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetImmLLiteral(const Instruction* source) {
|
||||
VIXL_ASSERT(IsWordAligned(source));
|
||||
ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
|
||||
Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
|
||||
Instr mask = ImmLLiteral_mask;
|
||||
|
||||
SetInstructionBits(Mask(~mask) | imm);
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatHalfWidth(const VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
|
||||
vform == kFormatH || vform == kFormatS || vform == kFormatD);
|
||||
switch (vform) {
|
||||
case kFormat8H: return kFormat8B;
|
||||
case kFormat4S: return kFormat4H;
|
||||
case kFormat2D: return kFormat2S;
|
||||
case kFormatH: return kFormatB;
|
||||
case kFormatS: return kFormatH;
|
||||
case kFormatD: return kFormatS;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
|
||||
vform == kFormatB || vform == kFormatH || vform == kFormatS);
|
||||
switch (vform) {
|
||||
case kFormat8B: return kFormat8H;
|
||||
case kFormat4H: return kFormat4S;
|
||||
case kFormat2S: return kFormat2D;
|
||||
case kFormatB: return kFormatH;
|
||||
case kFormatH: return kFormatS;
|
||||
case kFormatS: return kFormatD;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatFillQ(const VectorFormat vform) {
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B: return kFormat16B;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H: return kFormat8H;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S: return kFormat4S;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D: return kFormat2D;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) {
|
||||
switch (vform) {
|
||||
case kFormat4H: return kFormat8B;
|
||||
case kFormat8H: return kFormat16B;
|
||||
case kFormat2S: return kFormat4H;
|
||||
case kFormat4S: return kFormat8H;
|
||||
case kFormat1D: return kFormat2S;
|
||||
case kFormat2D: return kFormat4S;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
|
||||
switch (vform) {
|
||||
case kFormat8B: return kFormat16B;
|
||||
case kFormat4H: return kFormat8H;
|
||||
case kFormat2S: return kFormat4S;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatHalfLanes(const VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
|
||||
switch (vform) {
|
||||
case kFormat16B: return kFormat8B;
|
||||
case kFormat8H: return kFormat4H;
|
||||
case kFormat4S: return kFormat2S;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat ScalarFormatFromLaneSize(int laneSize) {
|
||||
switch (laneSize) {
|
||||
case 8: return kFormatB;
|
||||
case 16: return kFormatH;
|
||||
case 32: return kFormatS;
|
||||
case 64: return kFormatD;
|
||||
default: VIXL_UNREACHABLE(); return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB: return kBRegSize;
|
||||
case kFormatH: return kHRegSize;
|
||||
case kFormatS: return kSRegSize;
|
||||
case kFormatD: return kDRegSize;
|
||||
case kFormat8B:
|
||||
case kFormat4H:
|
||||
case kFormat2S:
|
||||
case kFormat1D: return kDRegSize;
|
||||
default: return kQRegSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
|
||||
return RegisterSizeInBitsFromFormat(vform) / 8;
|
||||
}
|
||||
|
||||
|
||||
unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B: return 8;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H: return 16;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S: return 32;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D: return 64;
|
||||
default: VIXL_UNREACHABLE(); return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int LaneSizeInBytesFromFormat(VectorFormat vform) {
|
||||
return LaneSizeInBitsFromFormat(vform) / 8;
|
||||
}
|
||||
|
||||
|
||||
int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B: return 0;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H: return 1;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S: return 2;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D: return 3;
|
||||
default: VIXL_UNREACHABLE(); return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int LaneCountFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormat16B: return 16;
|
||||
case kFormat8B:
|
||||
case kFormat8H: return 8;
|
||||
case kFormat4H:
|
||||
case kFormat4S: return 4;
|
||||
case kFormat2S:
|
||||
case kFormat2D: return 2;
|
||||
case kFormat1D:
|
||||
case kFormatB:
|
||||
case kFormatH:
|
||||
case kFormatS:
|
||||
case kFormatD: return 1;
|
||||
default: VIXL_UNREACHABLE(); return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int MaxLaneCountFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B: return 16;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H: return 8;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S: return 4;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D: return 2;
|
||||
default: VIXL_UNREACHABLE(); return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Does 'vform' indicate a vector format or a scalar format?
|
||||
bool IsVectorFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormatH:
|
||||
case kFormatS:
|
||||
case kFormatD: return false;
|
||||
default: return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int64_t MaxIntFromFormat(VectorFormat vform) {
|
||||
return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
|
||||
|
||||
int64_t MinIntFromFormat(VectorFormat vform) {
|
||||
return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
|
||||
|
||||
uint64_t MaxUintFromFormat(VectorFormat vform) {
|
||||
return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
} // namespace vixl
|
||||
|
757
disas/libvixl/vixl/a64/instructions-a64.h
Normal file
757
disas/libvixl/vixl/a64/instructions-a64.h
Normal file
@ -0,0 +1,757 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
|
||||
#define VIXL_A64_INSTRUCTIONS_A64_H_
|
||||
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/utils.h"
|
||||
#include "vixl/a64/constants-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
// ISA constants. --------------------------------------------------------------
|
||||
|
||||
typedef uint32_t Instr;
|
||||
const unsigned kInstructionSize = 4;
|
||||
const unsigned kInstructionSizeLog2 = 2;
|
||||
const unsigned kLiteralEntrySize = 4;
|
||||
const unsigned kLiteralEntrySizeLog2 = 2;
|
||||
const unsigned kMaxLoadLiteralRange = 1 * MBytes;
|
||||
|
||||
// This is the nominal page size (as used by the adrp instruction); the actual
|
||||
// size of the memory pages allocated by the kernel is likely to differ.
|
||||
const unsigned kPageSize = 4 * KBytes;
|
||||
const unsigned kPageSizeLog2 = 12;
|
||||
|
||||
const unsigned kBRegSize = 8;
|
||||
const unsigned kBRegSizeLog2 = 3;
|
||||
const unsigned kBRegSizeInBytes = kBRegSize / 8;
|
||||
const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
|
||||
const unsigned kHRegSize = 16;
|
||||
const unsigned kHRegSizeLog2 = 4;
|
||||
const unsigned kHRegSizeInBytes = kHRegSize / 8;
|
||||
const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
|
||||
const unsigned kWRegSize = 32;
|
||||
const unsigned kWRegSizeLog2 = 5;
|
||||
const unsigned kWRegSizeInBytes = kWRegSize / 8;
|
||||
const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
|
||||
const unsigned kXRegSize = 64;
|
||||
const unsigned kXRegSizeLog2 = 6;
|
||||
const unsigned kXRegSizeInBytes = kXRegSize / 8;
|
||||
const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
|
||||
const unsigned kSRegSize = 32;
|
||||
const unsigned kSRegSizeLog2 = 5;
|
||||
const unsigned kSRegSizeInBytes = kSRegSize / 8;
|
||||
const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
|
||||
const unsigned kDRegSize = 64;
|
||||
const unsigned kDRegSizeLog2 = 6;
|
||||
const unsigned kDRegSizeInBytes = kDRegSize / 8;
|
||||
const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
|
||||
const unsigned kQRegSize = 128;
|
||||
const unsigned kQRegSizeLog2 = 7;
|
||||
const unsigned kQRegSizeInBytes = kQRegSize / 8;
|
||||
const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
|
||||
const uint64_t kWRegMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kSRegMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kSSignMask = UINT64_C(0x80000000);
|
||||
const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
|
||||
const uint64_t kWSignMask = UINT64_C(0x80000000);
|
||||
const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
|
||||
const uint64_t kByteMask = UINT64_C(0xff);
|
||||
const uint64_t kHalfWordMask = UINT64_C(0xffff);
|
||||
const uint64_t kWordMask = UINT64_C(0xffffffff);
|
||||
const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
|
||||
const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
|
||||
const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
|
||||
const int64_t kXMinInt = INT64_C(0x8000000000000000);
|
||||
const int32_t kWMaxInt = INT32_C(0x7fffffff);
|
||||
const int32_t kWMinInt = INT32_C(0x80000000);
|
||||
const unsigned kLinkRegCode = 30;
|
||||
const unsigned kZeroRegCode = 31;
|
||||
const unsigned kSPRegInternalCode = 63;
|
||||
const unsigned kRegCodeMask = 0x1f;
|
||||
|
||||
const unsigned kAddressTagOffset = 56;
|
||||
const unsigned kAddressTagWidth = 8;
|
||||
const uint64_t kAddressTagMask =
|
||||
((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
|
||||
VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
|
||||
|
||||
// AArch64 floating-point specifics. These match IEEE-754.
|
||||
const unsigned kDoubleMantissaBits = 52;
|
||||
const unsigned kDoubleExponentBits = 11;
|
||||
const unsigned kFloatMantissaBits = 23;
|
||||
const unsigned kFloatExponentBits = 8;
|
||||
const unsigned kFloat16MantissaBits = 10;
|
||||
const unsigned kFloat16ExponentBits = 5;
|
||||
|
||||
// Floating-point infinity values.
|
||||
extern const float16 kFP16PositiveInfinity;
|
||||
extern const float16 kFP16NegativeInfinity;
|
||||
extern const float kFP32PositiveInfinity;
|
||||
extern const float kFP32NegativeInfinity;
|
||||
extern const double kFP64PositiveInfinity;
|
||||
extern const double kFP64NegativeInfinity;
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
extern const float16 kFP16DefaultNaN;
|
||||
extern const float kFP32DefaultNaN;
|
||||
extern const double kFP64DefaultNaN;
|
||||
|
||||
unsigned CalcLSDataSize(LoadStoreOp op);
|
||||
unsigned CalcLSPairDataSize(LoadStorePairOp op);
|
||||
|
||||
enum ImmBranchType {
|
||||
UnknownBranchType = 0,
|
||||
CondBranchType = 1,
|
||||
UncondBranchType = 2,
|
||||
CompareBranchType = 3,
|
||||
TestBranchType = 4
|
||||
};
|
||||
|
||||
enum AddrMode {
|
||||
Offset,
|
||||
PreIndex,
|
||||
PostIndex
|
||||
};
|
||||
|
||||
enum FPRounding {
|
||||
// The first four values are encodable directly by FPCR<RMode>.
|
||||
FPTieEven = 0x0,
|
||||
FPPositiveInfinity = 0x1,
|
||||
FPNegativeInfinity = 0x2,
|
||||
FPZero = 0x3,
|
||||
|
||||
// The final rounding modes are only available when explicitly specified by
|
||||
// the instruction (such as with fcvta). It cannot be set in FPCR.
|
||||
FPTieAway,
|
||||
FPRoundOdd
|
||||
};
|
||||
|
||||
enum Reg31Mode {
|
||||
Reg31IsStackPointer,
|
||||
Reg31IsZeroRegister
|
||||
};
|
||||
|
||||
// Instructions. ---------------------------------------------------------------
|
||||
|
||||
class Instruction {
|
||||
public:
|
||||
Instr InstructionBits() const {
|
||||
return *(reinterpret_cast<const Instr*>(this));
|
||||
}
|
||||
|
||||
void SetInstructionBits(Instr new_instr) {
|
||||
*(reinterpret_cast<Instr*>(this)) = new_instr;
|
||||
}
|
||||
|
||||
int Bit(int pos) const {
|
||||
return (InstructionBits() >> pos) & 1;
|
||||
}
|
||||
|
||||
uint32_t Bits(int msb, int lsb) const {
|
||||
return unsigned_bitextract_32(msb, lsb, InstructionBits());
|
||||
}
|
||||
|
||||
int32_t SignedBits(int msb, int lsb) const {
|
||||
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
|
||||
return signed_bitextract_32(msb, lsb, bits);
|
||||
}
|
||||
|
||||
Instr Mask(uint32_t mask) const {
|
||||
return InstructionBits() & mask;
|
||||
}
|
||||
|
||||
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
|
||||
int32_t Name() const { return Func(HighBit, LowBit); }
|
||||
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
|
||||
#undef DEFINE_GETTER
|
||||
|
||||
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
|
||||
// formed from ImmPCRelLo and ImmPCRelHi.
|
||||
int ImmPCRel() const {
|
||||
int offset =
|
||||
static_cast<int>((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
|
||||
int width = ImmPCRelLo_width + ImmPCRelHi_width;
|
||||
return signed_bitextract_32(width - 1, 0, offset);
|
||||
}
|
||||
|
||||
uint64_t ImmLogical() const;
|
||||
unsigned ImmNEONabcdefgh() const;
|
||||
float ImmFP32() const;
|
||||
double ImmFP64() const;
|
||||
float ImmNEONFP32() const;
|
||||
double ImmNEONFP64() const;
|
||||
|
||||
unsigned SizeLS() const {
|
||||
return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
|
||||
}
|
||||
|
||||
unsigned SizeLSPair() const {
|
||||
return CalcLSPairDataSize(
|
||||
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
|
||||
}
|
||||
|
||||
int NEONLSIndex(int access_size_shift) const {
|
||||
int64_t q = NEONQ();
|
||||
int64_t s = NEONS();
|
||||
int64_t size = NEONLSSize();
|
||||
int64_t index = (q << 3) | (s << 2) | size;
|
||||
return static_cast<int>(index >> access_size_shift);
|
||||
}
|
||||
|
||||
// Helpers.
|
||||
bool IsCondBranchImm() const {
|
||||
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsUncondBranchImm() const {
|
||||
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
|
||||
}
|
||||
|
||||
bool IsCompareBranch() const {
|
||||
return Mask(CompareBranchFMask) == CompareBranchFixed;
|
||||
}
|
||||
|
||||
bool IsTestBranch() const {
|
||||
return Mask(TestBranchFMask) == TestBranchFixed;
|
||||
}
|
||||
|
||||
bool IsImmBranch() const {
|
||||
return BranchType() != UnknownBranchType;
|
||||
}
|
||||
|
||||
bool IsPCRelAddressing() const {
|
||||
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
|
||||
}
|
||||
|
||||
bool IsLogicalImmediate() const {
|
||||
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubImmediate() const {
|
||||
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
|
||||
}
|
||||
|
||||
bool IsAddSubExtended() const {
|
||||
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
|
||||
}
|
||||
|
||||
bool IsLoadOrStore() const {
|
||||
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
|
||||
}
|
||||
|
||||
bool IsLoad() const;
|
||||
bool IsStore() const;
|
||||
|
||||
bool IsLoadLiteral() const {
|
||||
// This includes PRFM_lit.
|
||||
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
|
||||
}
|
||||
|
||||
bool IsMovn() const {
|
||||
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
|
||||
(Mask(MoveWideImmediateMask) == MOVN_w);
|
||||
}
|
||||
|
||||
static int ImmBranchRangeBitwidth(ImmBranchType branch_type);
|
||||
static int32_t ImmBranchForwardRange(ImmBranchType branch_type);
|
||||
static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
|
||||
|
||||
// Indicate whether Rd can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rd field.
|
||||
Reg31Mode RdMode() const {
|
||||
// The following instructions use sp or wsp as Rd:
|
||||
// Add/sub (immediate) when not setting the flags.
|
||||
// Add/sub (extended) when not setting the flags.
|
||||
// Logical (immediate) when not setting the flags.
|
||||
// Otherwise, r31 is the zero register.
|
||||
if (IsAddSubImmediate() || IsAddSubExtended()) {
|
||||
if (Mask(AddSubSetFlagsBit)) {
|
||||
return Reg31IsZeroRegister;
|
||||
} else {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
}
|
||||
if (IsLogicalImmediate()) {
|
||||
// Of the logical (immediate) instructions, only ANDS (and its aliases)
|
||||
// can set the flags. The others can all write into sp.
|
||||
// Note that some logical operations are not available to
|
||||
// immediate-operand instructions, so we have to combine two masks here.
|
||||
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
|
||||
return Reg31IsZeroRegister;
|
||||
} else {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
}
|
||||
return Reg31IsZeroRegister;
|
||||
}
|
||||
|
||||
// Indicate whether Rn can be the stack pointer or the zero register. This
|
||||
// does not check that the instruction actually has an Rn field.
|
||||
Reg31Mode RnMode() const {
|
||||
// The following instructions use sp or wsp as Rn:
|
||||
// All loads and stores.
|
||||
// Add/sub (immediate).
|
||||
// Add/sub (extended).
|
||||
// Otherwise, r31 is the zero register.
|
||||
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
|
||||
return Reg31IsStackPointer;
|
||||
}
|
||||
return Reg31IsZeroRegister;
|
||||
}
|
||||
|
||||
ImmBranchType BranchType() const {
|
||||
if (IsCondBranchImm()) {
|
||||
return CondBranchType;
|
||||
} else if (IsUncondBranchImm()) {
|
||||
return UncondBranchType;
|
||||
} else if (IsCompareBranch()) {
|
||||
return CompareBranchType;
|
||||
} else if (IsTestBranch()) {
|
||||
return TestBranchType;
|
||||
} else {
|
||||
return UnknownBranchType;
|
||||
}
|
||||
}
|
||||
|
||||
// Find the target of this instruction. 'this' may be a branch or a
|
||||
// PC-relative addressing instruction.
|
||||
const Instruction* ImmPCOffsetTarget() const;
|
||||
|
||||
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
|
||||
// a PC-relative addressing instruction.
|
||||
void SetImmPCOffsetTarget(const Instruction* target);
|
||||
// Patch a literal load instruction to load from 'source'.
|
||||
void SetImmLLiteral(const Instruction* source);
|
||||
|
||||
// The range of a load literal instruction, expressed as 'instr +- range'.
|
||||
// The range is actually the 'positive' range; the branch instruction can
|
||||
// target [instr - range - kInstructionSize, instr + range].
|
||||
static const int kLoadLiteralImmBitwidth = 19;
|
||||
static const int kLoadLiteralRange =
|
||||
(1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
|
||||
|
||||
// Calculate the address of a literal referred to by a load-literal
|
||||
// instruction, and return it as the specified type.
|
||||
//
|
||||
// The literal itself is safely mutable only if the backing buffer is safely
|
||||
// mutable.
|
||||
template <typename T>
|
||||
T LiteralAddress() const {
|
||||
uint64_t base_raw = reinterpret_cast<uint64_t>(this);
|
||||
int64_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
|
||||
uint64_t address_raw = base_raw + offset;
|
||||
|
||||
// Cast the address using a C-style cast. A reinterpret_cast would be
|
||||
// appropriate, but it can't cast one integral type to another.
|
||||
T address = (T)(address_raw);
|
||||
|
||||
// Assert that the address can be represented by the specified type.
|
||||
VIXL_ASSERT((uint64_t)(address) == address_raw);
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
uint32_t Literal32() const {
|
||||
uint32_t literal;
|
||||
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
|
||||
return literal;
|
||||
}
|
||||
|
||||
uint64_t Literal64() const {
|
||||
uint64_t literal;
|
||||
memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
|
||||
return literal;
|
||||
}
|
||||
|
||||
float LiteralFP32() const {
|
||||
return rawbits_to_float(Literal32());
|
||||
}
|
||||
|
||||
double LiteralFP64() const {
|
||||
return rawbits_to_double(Literal64());
|
||||
}
|
||||
|
||||
const Instruction* NextInstruction() const {
|
||||
return this + kInstructionSize;
|
||||
}
|
||||
|
||||
const Instruction* InstructionAtOffset(int64_t offset) const {
|
||||
VIXL_ASSERT(IsWordAligned(this + offset));
|
||||
return this + offset;
|
||||
}
|
||||
|
||||
template<typename T> static Instruction* Cast(T src) {
|
||||
return reinterpret_cast<Instruction*>(src);
|
||||
}
|
||||
|
||||
template<typename T> static const Instruction* CastConst(T src) {
|
||||
return reinterpret_cast<const Instruction*>(src);
|
||||
}
|
||||
|
||||
private:
|
||||
int ImmBranch() const;
|
||||
|
||||
static float Imm8ToFP32(uint32_t imm8);
|
||||
static double Imm8ToFP64(uint32_t imm8);
|
||||
|
||||
void SetPCRelImmTarget(const Instruction* target);
|
||||
void SetBranchImmTarget(const Instruction* target);
|
||||
};
|
||||
|
||||
|
||||
// Functions for handling NEON vector format information.
|
||||
enum VectorFormat {
|
||||
kFormatUndefined = 0xffffffff,
|
||||
kFormat8B = NEON_8B,
|
||||
kFormat16B = NEON_16B,
|
||||
kFormat4H = NEON_4H,
|
||||
kFormat8H = NEON_8H,
|
||||
kFormat2S = NEON_2S,
|
||||
kFormat4S = NEON_4S,
|
||||
kFormat1D = NEON_1D,
|
||||
kFormat2D = NEON_2D,
|
||||
|
||||
// Scalar formats. We add the scalar bit to distinguish between scalar and
|
||||
// vector enumerations; the bit is always set in the encoding of scalar ops
|
||||
// and always clear for vector ops. Although kFormatD and kFormat1D appear
|
||||
// to be the same, their meaning is subtly different. The first is a scalar
|
||||
// operation, the second a vector operation that only affects one lane.
|
||||
kFormatB = NEON_B | NEONScalar,
|
||||
kFormatH = NEON_H | NEONScalar,
|
||||
kFormatS = NEON_S | NEONScalar,
|
||||
kFormatD = NEON_D | NEONScalar
|
||||
};
|
||||
|
||||
VectorFormat VectorFormatHalfWidth(const VectorFormat vform);
|
||||
VectorFormat VectorFormatDoubleWidth(const VectorFormat vform);
|
||||
VectorFormat VectorFormatDoubleLanes(const VectorFormat vform);
|
||||
VectorFormat VectorFormatHalfLanes(const VectorFormat vform);
|
||||
VectorFormat ScalarFormatFromLaneSize(int lanesize);
|
||||
VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform);
|
||||
VectorFormat VectorFormatFillQ(const VectorFormat vform);
|
||||
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
|
||||
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
|
||||
// TODO: Make the return types of these functions consistent.
|
||||
unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
|
||||
int LaneSizeInBytesFromFormat(VectorFormat vform);
|
||||
int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
|
||||
int LaneCountFromFormat(VectorFormat vform);
|
||||
int MaxLaneCountFromFormat(VectorFormat vform);
|
||||
bool IsVectorFormat(VectorFormat vform);
|
||||
int64_t MaxIntFromFormat(VectorFormat vform);
|
||||
int64_t MinIntFromFormat(VectorFormat vform);
|
||||
uint64_t MaxUintFromFormat(VectorFormat vform);
|
||||
|
||||
|
||||
enum NEONFormat {
|
||||
NF_UNDEF = 0,
|
||||
NF_8B = 1,
|
||||
NF_16B = 2,
|
||||
NF_4H = 3,
|
||||
NF_8H = 4,
|
||||
NF_2S = 5,
|
||||
NF_4S = 6,
|
||||
NF_1D = 7,
|
||||
NF_2D = 8,
|
||||
NF_B = 9,
|
||||
NF_H = 10,
|
||||
NF_S = 11,
|
||||
NF_D = 12
|
||||
};
|
||||
|
||||
static const unsigned kNEONFormatMaxBits = 6;
|
||||
|
||||
struct NEONFormatMap {
|
||||
// The bit positions in the instruction to consider.
|
||||
uint8_t bits[kNEONFormatMaxBits];
|
||||
|
||||
// Mapping from concatenated bits to format.
|
||||
NEONFormat map[1 << kNEONFormatMaxBits];
|
||||
};
|
||||
|
||||
class NEONFormatDecoder {
|
||||
public:
|
||||
enum SubstitutionMode {
|
||||
kPlaceholder,
|
||||
kFormat
|
||||
};
|
||||
|
||||
// Construct a format decoder with increasingly specific format maps for each
|
||||
// subsitution. If no format map is specified, the default is the integer
|
||||
// format map.
|
||||
explicit NEONFormatDecoder(const Instruction* instr) {
|
||||
instrbits_ = instr->InstructionBits();
|
||||
SetFormatMaps(IntegerFormatMap());
|
||||
}
|
||||
NEONFormatDecoder(const Instruction* instr,
|
||||
const NEONFormatMap* format) {
|
||||
instrbits_ = instr->InstructionBits();
|
||||
SetFormatMaps(format);
|
||||
}
|
||||
NEONFormatDecoder(const Instruction* instr,
|
||||
const NEONFormatMap* format0,
|
||||
const NEONFormatMap* format1) {
|
||||
instrbits_ = instr->InstructionBits();
|
||||
SetFormatMaps(format0, format1);
|
||||
}
|
||||
NEONFormatDecoder(const Instruction* instr,
|
||||
const NEONFormatMap* format0,
|
||||
const NEONFormatMap* format1,
|
||||
const NEONFormatMap* format2) {
|
||||
instrbits_ = instr->InstructionBits();
|
||||
SetFormatMaps(format0, format1, format2);
|
||||
}
|
||||
|
||||
// Set the format mapping for all or individual substitutions.
|
||||
void SetFormatMaps(const NEONFormatMap* format0,
|
||||
const NEONFormatMap* format1 = NULL,
|
||||
const NEONFormatMap* format2 = NULL) {
|
||||
VIXL_ASSERT(format0 != NULL);
|
||||
formats_[0] = format0;
|
||||
formats_[1] = (format1 == NULL) ? formats_[0] : format1;
|
||||
formats_[2] = (format2 == NULL) ? formats_[1] : format2;
|
||||
}
|
||||
void SetFormatMap(unsigned index, const NEONFormatMap* format) {
|
||||
VIXL_ASSERT(index <= (sizeof(formats_) / sizeof(formats_[0])));
|
||||
VIXL_ASSERT(format != NULL);
|
||||
formats_[index] = format;
|
||||
}
|
||||
|
||||
// Substitute %s in the input string with the placeholder string for each
|
||||
// register, ie. "'B", "'H", etc.
|
||||
const char* SubstitutePlaceholders(const char* string) {
|
||||
return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
|
||||
}
|
||||
|
||||
// Substitute %s in the input string with a new string based on the
|
||||
// substitution mode.
|
||||
const char* Substitute(const char* string,
|
||||
SubstitutionMode mode0 = kFormat,
|
||||
SubstitutionMode mode1 = kFormat,
|
||||
SubstitutionMode mode2 = kFormat) {
|
||||
snprintf(form_buffer_, sizeof(form_buffer_), string,
|
||||
GetSubstitute(0, mode0),
|
||||
GetSubstitute(1, mode1),
|
||||
GetSubstitute(2, mode2));
|
||||
return form_buffer_;
|
||||
}
|
||||
|
||||
// Append a "2" to a mnemonic string based of the state of the Q bit.
|
||||
const char* Mnemonic(const char* mnemonic) {
|
||||
if ((instrbits_ & NEON_Q) != 0) {
|
||||
snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
|
||||
return mne_buffer_;
|
||||
}
|
||||
return mnemonic;
|
||||
}
|
||||
|
||||
VectorFormat GetVectorFormat(int format_index = 0) {
|
||||
return GetVectorFormat(formats_[format_index]);
|
||||
}
|
||||
|
||||
VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
|
||||
static const VectorFormat vform[] = {
|
||||
kFormatUndefined,
|
||||
kFormat8B, kFormat16B, kFormat4H, kFormat8H,
|
||||
kFormat2S, kFormat4S, kFormat1D, kFormat2D,
|
||||
kFormatB, kFormatH, kFormatS, kFormatD
|
||||
};
|
||||
VIXL_ASSERT(GetNEONFormat(format_map) < (sizeof(vform) / sizeof(vform[0])));
|
||||
return vform[GetNEONFormat(format_map)];
|
||||
}
|
||||
|
||||
// Built in mappings for common cases.
|
||||
|
||||
// The integer format map uses three bits (Q, size<1:0>) to encode the
|
||||
// "standard" set of NEON integer vector formats.
|
||||
static const NEONFormatMap* IntegerFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{23, 22, 30},
|
||||
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The long integer format map uses two bits (size<1:0>) to encode the
|
||||
// long set of NEON integer vector formats. These are used in narrow, wide
|
||||
// and long operations.
|
||||
static const NEONFormatMap* LongIntegerFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{23, 22}, {NF_8H, NF_4S, NF_2D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
|
||||
// formats: NF_2S, NF_4S, NF_2D.
|
||||
static const NEONFormatMap* FPFormatMap() {
|
||||
// The FP format map assumes two bits (Q, size<0>) are used to encode the
|
||||
// NEON FP vector formats: NF_2S, NF_4S, NF_2D.
|
||||
static const NEONFormatMap map = {
|
||||
{22, 30}, {NF_2S, NF_4S, NF_UNDEF, NF_2D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The load/store format map uses three bits (Q, 11, 10) to encode the
|
||||
// set of NEON vector formats.
|
||||
static const NEONFormatMap* LoadStoreFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{11, 10, 30},
|
||||
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The logical format map uses one bit (Q) to encode the NEON vector format:
|
||||
// NF_8B, NF_16B.
|
||||
static const NEONFormatMap* LogicalFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{30}, {NF_8B, NF_16B}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The triangular format map uses between two and five bits to encode the NEON
|
||||
// vector format:
|
||||
// xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
|
||||
// x1000->2S, x1001->4S, 10001->2D, all others undefined.
|
||||
static const NEONFormatMap* TriangularFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{19, 18, 17, 16, 30},
|
||||
{NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S,
|
||||
NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_UNDEF, NF_2D,
|
||||
NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, NF_4S, NF_8B, NF_16B,
|
||||
NF_4H, NF_8H, NF_8B, NF_16B}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
|
||||
// formats: NF_B, NF_H, NF_S, NF_D.
|
||||
static const NEONFormatMap* ScalarFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{23, 22}, {NF_B, NF_H, NF_S, NF_D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The long scalar format map uses two bits (size<1:0>) to encode the longer
|
||||
// NEON scalar formats: NF_H, NF_S, NF_D.
|
||||
static const NEONFormatMap* LongScalarFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{23, 22}, {NF_H, NF_S, NF_D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The FP scalar format map assumes one bit (size<0>) is used to encode the
|
||||
// NEON FP scalar formats: NF_S, NF_D.
|
||||
static const NEONFormatMap* FPScalarFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{22}, {NF_S, NF_D}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
// The triangular scalar format map uses between one and four bits to encode
|
||||
// the NEON FP scalar formats:
|
||||
// xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
|
||||
static const NEONFormatMap* TriangularScalarFormatMap() {
|
||||
static const NEONFormatMap map = {
|
||||
{19, 18, 17, 16},
|
||||
{NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B,
|
||||
NF_D, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B}
|
||||
};
|
||||
return ↦
|
||||
}
|
||||
|
||||
private:
|
||||
// Get a pointer to a string that represents the format or placeholder for
|
||||
// the specified substitution index, based on the format map and instruction.
|
||||
const char* GetSubstitute(int index, SubstitutionMode mode) {
|
||||
if (mode == kFormat) {
|
||||
return NEONFormatAsString(GetNEONFormat(formats_[index]));
|
||||
}
|
||||
VIXL_ASSERT(mode == kPlaceholder);
|
||||
return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
|
||||
}
|
||||
|
||||
// Get the NEONFormat enumerated value for bits obtained from the
|
||||
// instruction based on the specified format mapping.
|
||||
NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
|
||||
return format_map->map[PickBits(format_map->bits)];
|
||||
}
|
||||
|
||||
// Convert a NEONFormat into a string.
|
||||
static const char* NEONFormatAsString(NEONFormat format) {
|
||||
static const char* formats[] = {
|
||||
"undefined",
|
||||
"8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
|
||||
"b", "h", "s", "d"
|
||||
};
|
||||
VIXL_ASSERT(format < (sizeof(formats) / sizeof(formats[0])));
|
||||
return formats[format];
|
||||
}
|
||||
|
||||
// Convert a NEONFormat into a register placeholder string.
|
||||
static const char* NEONFormatAsPlaceholder(NEONFormat format) {
|
||||
VIXL_ASSERT((format == NF_B) || (format == NF_H) ||
|
||||
(format == NF_S) || (format == NF_D) ||
|
||||
(format == NF_UNDEF));
|
||||
static const char* formats[] = {
|
||||
"undefined",
|
||||
"undefined", "undefined", "undefined", "undefined",
|
||||
"undefined", "undefined", "undefined", "undefined",
|
||||
"'B", "'H", "'S", "'D"
|
||||
};
|
||||
return formats[format];
|
||||
}
|
||||
|
||||
// Select bits from instrbits_ defined by the bits array, concatenate them,
|
||||
// and return the value.
|
||||
uint8_t PickBits(const uint8_t bits[]) {
|
||||
uint8_t result = 0;
|
||||
for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
|
||||
if (bits[b] == 0) break;
|
||||
result <<= 1;
|
||||
result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
Instr instrbits_;
|
||||
const NEONFormatMap* formats_[3];
|
||||
char form_buffer_[64];
|
||||
char mne_buffer_[16];
|
||||
};
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_A64_INSTRUCTIONS_A64_H_
|
@ -28,7 +28,7 @@
|
||||
#define VIXL_CODE_BUFFER_H
|
||||
|
||||
#include <string.h>
|
||||
#include "globals.h"
|
||||
#include "vixl/globals.h"
|
||||
|
||||
namespace vixl {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
@ -24,53 +24,13 @@
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "utils.h"
|
||||
#include <stdio.h>
|
||||
#include "compiler-intrinsics.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
uint32_t float_to_rawbits(float value) {
|
||||
uint32_t bits = 0;
|
||||
memcpy(&bits, &value, 4);
|
||||
return bits;
|
||||
}
|
||||
|
||||
|
||||
uint64_t double_to_rawbits(double value) {
|
||||
uint64_t bits = 0;
|
||||
memcpy(&bits, &value, 8);
|
||||
return bits;
|
||||
}
|
||||
|
||||
|
||||
float rawbits_to_float(uint32_t bits) {
|
||||
float value = 0.0;
|
||||
memcpy(&value, &bits, 4);
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
double rawbits_to_double(uint64_t bits) {
|
||||
double value = 0.0;
|
||||
memcpy(&value, &bits, 8);
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
int CountLeadingZeros(uint64_t value, int width) {
|
||||
VIXL_ASSERT((width == 32) || (width == 64));
|
||||
int count = 0;
|
||||
uint64_t bit_test = UINT64_C(1) << (width - 1);
|
||||
while ((count < width) && ((bit_test & value) == 0)) {
|
||||
count++;
|
||||
bit_test >>= 1;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
int CountLeadingSignBits(int64_t value, int width) {
|
||||
VIXL_ASSERT((width == 32) || (width == 64));
|
||||
int CountLeadingSignBitsFallBack(int64_t value, int width) {
|
||||
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
|
||||
if (value >= 0) {
|
||||
return CountLeadingZeros(value, width) - 1;
|
||||
} else {
|
||||
@ -79,23 +39,46 @@ int CountLeadingSignBits(int64_t value, int width) {
|
||||
}
|
||||
|
||||
|
||||
int CountTrailingZeros(uint64_t value, int width) {
|
||||
VIXL_ASSERT((width == 32) || (width == 64));
|
||||
int count = 0;
|
||||
while ((count < width) && (((value >> count) & 1) == 0)) {
|
||||
count++;
|
||||
int CountLeadingZerosFallBack(uint64_t value, int width) {
|
||||
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
|
||||
if (value == 0) {
|
||||
return width;
|
||||
}
|
||||
int count = 0;
|
||||
value = value << (64 - width);
|
||||
if ((value & UINT64_C(0xffffffff00000000)) == 0) {
|
||||
count += 32;
|
||||
value = value << 32;
|
||||
}
|
||||
if ((value & UINT64_C(0xffff000000000000)) == 0) {
|
||||
count += 16;
|
||||
value = value << 16;
|
||||
}
|
||||
if ((value & UINT64_C(0xff00000000000000)) == 0) {
|
||||
count += 8;
|
||||
value = value << 8;
|
||||
}
|
||||
if ((value & UINT64_C(0xf000000000000000)) == 0) {
|
||||
count += 4;
|
||||
value = value << 4;
|
||||
}
|
||||
if ((value & UINT64_C(0xc000000000000000)) == 0) {
|
||||
count += 2;
|
||||
value = value << 2;
|
||||
}
|
||||
if ((value & UINT64_C(0x8000000000000000)) == 0) {
|
||||
count += 1;
|
||||
}
|
||||
count += (value == 0);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
int CountSetBits(uint64_t value, int width) {
|
||||
// TODO: Other widths could be added here, as the implementation already
|
||||
// supports them.
|
||||
VIXL_ASSERT((width == 32) || (width == 64));
|
||||
int CountSetBitsFallBack(uint64_t value, int width) {
|
||||
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
|
||||
|
||||
// Mask out unused bits to ensure that they are not counted.
|
||||
value &= (UINT64_C(0xffffffffffffffff) >> (64-width));
|
||||
value &= (UINT64_C(0xffffffffffffffff) >> (64 - width));
|
||||
|
||||
// Add up the set bits.
|
||||
// The algorithm works by adding pairs of bit fields together iteratively,
|
||||
@ -122,30 +105,40 @@ int CountSetBits(uint64_t value, int width) {
|
||||
value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]);
|
||||
}
|
||||
|
||||
return value;
|
||||
return static_cast<int>(value);
|
||||
}
|
||||
|
||||
|
||||
uint64_t LowestSetBit(uint64_t value) {
|
||||
return value & -value;
|
||||
}
|
||||
|
||||
|
||||
bool IsPowerOf2(int64_t value) {
|
||||
return (value != 0) && ((value & (value - 1)) == 0);
|
||||
}
|
||||
|
||||
|
||||
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
|
||||
VIXL_ASSERT((reg_size % 8) == 0);
|
||||
int CountTrailingZerosFallBack(uint64_t value, int width) {
|
||||
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
|
||||
int count = 0;
|
||||
for (unsigned i = 0; i < (reg_size / 16); i++) {
|
||||
if ((imm & 0xffff) == 0) {
|
||||
count++;
|
||||
}
|
||||
imm >>= 16;
|
||||
value = value << (64 - width);
|
||||
if ((value & UINT64_C(0xffffffff)) == 0) {
|
||||
count += 32;
|
||||
value = value >> 32;
|
||||
}
|
||||
return count;
|
||||
if ((value & 0xffff) == 0) {
|
||||
count += 16;
|
||||
value = value >> 16;
|
||||
}
|
||||
if ((value & 0xff) == 0) {
|
||||
count += 8;
|
||||
value = value >> 8;
|
||||
}
|
||||
if ((value & 0xf) == 0) {
|
||||
count += 4;
|
||||
value = value >> 4;
|
||||
}
|
||||
if ((value & 0x3) == 0) {
|
||||
count += 2;
|
||||
value = value >> 2;
|
||||
}
|
||||
if ((value & 0x1) == 0) {
|
||||
count += 1;
|
||||
}
|
||||
count += (value == 0);
|
||||
return count - (64 - width);
|
||||
}
|
||||
|
||||
|
||||
} // namespace vixl
|
155
disas/libvixl/vixl/compiler-intrinsics.h
Normal file
155
disas/libvixl/vixl/compiler-intrinsics.h
Normal file
@ -0,0 +1,155 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
#ifndef VIXL_COMPILER_INTRINSICS_H
|
||||
#define VIXL_COMPILER_INTRINSICS_H
|
||||
|
||||
#include "globals.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// Helper to check whether the version of GCC used is greater than the specified
|
||||
// requirement.
|
||||
#define MAJOR 1000000
|
||||
#define MINOR 1000
|
||||
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
|
||||
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
|
||||
((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR + __GNUC_PATCHLEVEL__) >= \
|
||||
((major) * MAJOR + (minor) * MINOR + (patchlevel)))
|
||||
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
|
||||
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
|
||||
((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR) >= \
|
||||
((major) * MAJOR + (minor) * MINOR + (patchlevel)))
|
||||
#else
|
||||
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS)
|
||||
|
||||
#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb))
|
||||
#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
|
||||
#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
|
||||
#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs))
|
||||
#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
|
||||
|
||||
#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS)
|
||||
// The documentation for these builtins is available at:
|
||||
// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html
|
||||
|
||||
# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0))
|
||||
# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0))
|
||||
# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0))
|
||||
# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0))
|
||||
# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0))
|
||||
|
||||
#else
|
||||
// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually
|
||||
// implemented C++ methods.
|
||||
|
||||
#define COMPILER_HAS_BUILTIN_BSWAP false
|
||||
#define COMPILER_HAS_BUILTIN_CLRSB false
|
||||
#define COMPILER_HAS_BUILTIN_CLZ false
|
||||
#define COMPILER_HAS_BUILTIN_CTZ false
|
||||
#define COMPILER_HAS_BUILTIN_FFS false
|
||||
#define COMPILER_HAS_BUILTIN_POPCOUNT false
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline bool IsPowerOf2(V value) {
|
||||
return (value != 0) && ((value & (value - 1)) == 0);
|
||||
}
|
||||
|
||||
|
||||
// Declaration of fallback functions.
|
||||
int CountLeadingSignBitsFallBack(int64_t value, int width);
|
||||
int CountLeadingZerosFallBack(uint64_t value, int width);
|
||||
int CountSetBitsFallBack(uint64_t value, int width);
|
||||
int CountTrailingZerosFallBack(uint64_t value, int width);
|
||||
|
||||
|
||||
// Implementation of intrinsics functions.
|
||||
// TODO: The implementations could be improved for sizes different from 32bit
|
||||
// and 64bit: we could mask the values and call the appropriate builtin.
|
||||
|
||||
template<typename V>
|
||||
inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
|
||||
#if COMPILER_HAS_BUILTIN_CLRSB
|
||||
if (width == 32) {
|
||||
return __builtin_clrsb(value);
|
||||
} else if (width == 64) {
|
||||
return __builtin_clrsbll(value);
|
||||
}
|
||||
#endif
|
||||
return CountLeadingSignBitsFallBack(value, width);
|
||||
}
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) {
|
||||
#if COMPILER_HAS_BUILTIN_CLZ
|
||||
if (width == 32) {
|
||||
return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value));
|
||||
} else if (width == 64) {
|
||||
return (value == 0) ? 64 : __builtin_clzll(value);
|
||||
}
|
||||
#endif
|
||||
return CountLeadingZerosFallBack(value, width);
|
||||
}
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline int CountSetBits(V value, int width = (sizeof(V) * 8)) {
|
||||
#if COMPILER_HAS_BUILTIN_POPCOUNT
|
||||
if (width == 32) {
|
||||
return __builtin_popcount(static_cast<unsigned>(value));
|
||||
} else if (width == 64) {
|
||||
return __builtin_popcountll(value);
|
||||
}
|
||||
#endif
|
||||
return CountSetBitsFallBack(value, width);
|
||||
}
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) {
|
||||
#if COMPILER_HAS_BUILTIN_CTZ
|
||||
if (width == 32) {
|
||||
return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value));
|
||||
} else if (width == 64) {
|
||||
return (value == 0) ? 64 : __builtin_ctzll(value);
|
||||
}
|
||||
#endif
|
||||
return CountTrailingZerosFallBack(value, width);
|
||||
}
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_COMPILER_INTRINSICS_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
@ -49,20 +49,26 @@
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <stddef.h>
|
||||
#include "platform.h"
|
||||
#include "vixl/platform.h"
|
||||
|
||||
|
||||
typedef uint8_t byte;
|
||||
|
||||
// Type for half-precision (16 bit) floating point numbers.
|
||||
typedef uint16_t float16;
|
||||
|
||||
const int KBytes = 1024;
|
||||
const int MBytes = 1024 * KBytes;
|
||||
|
||||
#define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
|
||||
#define VIXL_ABORT() \
|
||||
do { printf("in %s, line %i", __FILE__, __LINE__); abort(); } while (false)
|
||||
#ifdef VIXL_DEBUG
|
||||
#define VIXL_ASSERT(condition) assert(condition)
|
||||
#define VIXL_CHECK(condition) VIXL_ASSERT(condition)
|
||||
#define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT()
|
||||
#define VIXL_UNREACHABLE() printf("UNREACHABLE\t"); VIXL_ABORT()
|
||||
#define VIXL_UNIMPLEMENTED() \
|
||||
do { fprintf(stderr, "UNIMPLEMENTED\t"); VIXL_ABORT(); } while (false)
|
||||
#define VIXL_UNREACHABLE() \
|
||||
do { fprintf(stderr, "UNREACHABLE\t"); VIXL_ABORT(); } while (false)
|
||||
#else
|
||||
#define VIXL_ASSERT(condition) ((void) 0)
|
||||
#define VIXL_CHECK(condition) assert(condition)
|
||||
@ -76,10 +82,70 @@ const int MBytes = 1024 * KBytes;
|
||||
#define VIXL_STATIC_ASSERT_LINE(line, condition) \
|
||||
typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
|
||||
__attribute__((unused))
|
||||
#define VIXL_STATIC_ASSERT(condition) VIXL_STATIC_ASSERT_LINE(__LINE__, condition) //NOLINT
|
||||
#define VIXL_STATIC_ASSERT(condition) \
|
||||
VIXL_STATIC_ASSERT_LINE(__LINE__, condition)
|
||||
|
||||
template <typename T> inline void USE(T) {}
|
||||
template <typename T1>
|
||||
inline void USE(T1) {}
|
||||
|
||||
#define VIXL_ALIGNMENT_EXCEPTION() printf("ALIGNMENT EXCEPTION\t"); VIXL_ABORT()
|
||||
template <typename T1, typename T2>
|
||||
inline void USE(T1, T2) {}
|
||||
|
||||
template <typename T1, typename T2, typename T3>
|
||||
inline void USE(T1, T2, T3) {}
|
||||
|
||||
template <typename T1, typename T2, typename T3, typename T4>
|
||||
inline void USE(T1, T2, T3, T4) {}
|
||||
|
||||
#define VIXL_ALIGNMENT_EXCEPTION() \
|
||||
do { fprintf(stderr, "ALIGNMENT EXCEPTION\t"); VIXL_ABORT(); } while (0)
|
||||
|
||||
// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
|
||||
// argument to annotate intentional fall-through between switch labels.
|
||||
// For more information please refer to:
|
||||
// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
|
||||
#ifndef __has_warning
|
||||
#define __has_warning(x) 0
|
||||
#endif
|
||||
|
||||
// Note: This option is only available for Clang. And will only be enabled for
|
||||
// C++11(201103L).
|
||||
#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
|
||||
#define VIXL_FALLTHROUGH() [[clang::fallthrough]] //NOLINT
|
||||
#else
|
||||
#define VIXL_FALLTHROUGH() do {} while (0)
|
||||
#endif
|
||||
|
||||
#if __cplusplus >= 201103L
|
||||
#define VIXL_NO_RETURN [[noreturn]] //NOLINT
|
||||
#else
|
||||
#define VIXL_NO_RETURN __attribute__((noreturn))
|
||||
#endif
|
||||
|
||||
// Some functions might only be marked as "noreturn" for the DEBUG build. This
|
||||
// macro should be used for such cases (for more details see what
|
||||
// VIXL_UNREACHABLE expands to).
|
||||
#ifdef VIXL_DEBUG
|
||||
#define VIXL_DEBUG_NO_RETURN VIXL_NO_RETURN
|
||||
#else
|
||||
#define VIXL_DEBUG_NO_RETURN
|
||||
#endif
|
||||
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR
|
||||
#ifndef VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE
|
||||
#define VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE 1
|
||||
#endif
|
||||
#else
|
||||
#ifndef VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE
|
||||
#define VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE 0
|
||||
#endif
|
||||
#if VIXL_GENERATE_SIMULATOR_INSTRUCTIONS_VALUE
|
||||
#warning "Generating Simulator instructions without Simulator support."
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef USE_SIMULATOR
|
||||
#error "Please see the release notes for USE_SIMULATOR."
|
||||
#endif
|
||||
|
||||
#endif // VIXL_GLOBALS_H
|
775
disas/libvixl/vixl/invalset.h
Normal file
775
disas/libvixl/vixl/invalset.h
Normal file
@ -0,0 +1,775 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_INVALSET_H_
|
||||
#define VIXL_INVALSET_H_
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "vixl/globals.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// We define a custom data structure template and its iterator as `std`
|
||||
// containers do not fit the performance requirements for some of our use cases.
|
||||
//
|
||||
// The structure behaves like an iterable unordered set with special properties
|
||||
// and restrictions. "InvalSet" stands for "Invalidatable Set".
|
||||
//
|
||||
// Restrictions and requirements:
|
||||
// - Adding an element already present in the set is illegal. In debug mode,
|
||||
// this is checked at insertion time.
|
||||
// - The templated class `ElementType` must provide comparison operators so that
|
||||
// `std::sort()` can be used.
|
||||
// - A key must be available to represent invalid elements.
|
||||
// - Elements with an invalid key must compare higher or equal to any other
|
||||
// element.
|
||||
//
|
||||
// Use cases and performance considerations:
|
||||
// Our use cases present two specificities that allow us to design this
|
||||
// structure to provide fast insertion *and* fast search and deletion
|
||||
// operations:
|
||||
// - Elements are (generally) inserted in order (sorted according to their key).
|
||||
// - A key is available to mark elements as invalid (deleted).
|
||||
// The backing `std::vector` allows for fast insertions. When
|
||||
// searching for an element we ensure the elements are sorted (this is generally
|
||||
// the case) and perform a binary search. When deleting an element we do not
|
||||
// free the associated memory immediately. Instead, an element to be deleted is
|
||||
// marked with the 'invalid' key. Other methods of the container take care of
|
||||
// ignoring entries marked as invalid.
|
||||
// To avoid the overhead of the `std::vector` container when only few entries
|
||||
// are used, a number of elements are preallocated.
|
||||
|
||||
// 'ElementType' and 'KeyType' are respectively the types of the elements and
|
||||
// their key. The structure only reclaims memory when safe to do so, if the
|
||||
// number of elements that can be reclaimed is greater than `RECLAIM_FROM` and
|
||||
// greater than `<total number of elements> / RECLAIM_FACTOR.
|
||||
#define TEMPLATE_INVALSET_P_DECL \
|
||||
class ElementType, \
|
||||
unsigned N_PREALLOCATED_ELEMENTS, \
|
||||
class KeyType, \
|
||||
KeyType INVALID_KEY, \
|
||||
size_t RECLAIM_FROM, \
|
||||
unsigned RECLAIM_FACTOR
|
||||
|
||||
#define TEMPLATE_INVALSET_P_DEF \
|
||||
ElementType, N_PREALLOCATED_ELEMENTS, \
|
||||
KeyType, INVALID_KEY, RECLAIM_FROM, RECLAIM_FACTOR
|
||||
|
||||
template<class S> class InvalSetIterator; // Forward declaration.
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL> class InvalSet {
|
||||
public:
|
||||
InvalSet();
|
||||
~InvalSet();
|
||||
|
||||
static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS;
|
||||
static const KeyType kInvalidKey = INVALID_KEY;
|
||||
|
||||
// It is illegal to insert an element already present in the set.
|
||||
void insert(const ElementType& element);
|
||||
|
||||
// Looks for the specified element in the set and - if found - deletes it.
|
||||
void erase(const ElementType& element);
|
||||
|
||||
// This indicates the number of (valid) elements stored in this set.
|
||||
size_t size() const;
|
||||
|
||||
// Returns true if no elements are stored in the set.
|
||||
// Note that this does not mean the the backing storage is empty: it can still
|
||||
// contain invalid elements.
|
||||
bool empty() const;
|
||||
|
||||
void clear();
|
||||
|
||||
const ElementType min_element();
|
||||
|
||||
// This returns the key of the minimum element in the set.
|
||||
KeyType min_element_key();
|
||||
|
||||
static bool IsValid(const ElementType& element);
|
||||
static KeyType Key(const ElementType& element);
|
||||
static void SetKey(ElementType* element, KeyType key);
|
||||
|
||||
protected:
|
||||
// Returns a pointer to the element in vector_ if it was found, or NULL
|
||||
// otherwise.
|
||||
ElementType* Search(const ElementType& element);
|
||||
|
||||
// The argument *must* point to an element stored in *this* set.
|
||||
// This function is not allowed to move elements in the backing vector
|
||||
// storage.
|
||||
void EraseInternal(ElementType* element);
|
||||
|
||||
// The elements in the range searched must be sorted.
|
||||
ElementType* BinarySearch(const ElementType& element,
|
||||
ElementType* start,
|
||||
ElementType* end) const;
|
||||
|
||||
// Sort the elements.
|
||||
enum SortType {
|
||||
// The 'hard' version guarantees that invalid elements are moved to the end
|
||||
// of the container.
|
||||
kHardSort,
|
||||
// The 'soft' version only guarantees that the elements will be sorted.
|
||||
// Invalid elements may still be present anywhere in the set.
|
||||
kSoftSort
|
||||
};
|
||||
void Sort(SortType sort_type);
|
||||
|
||||
// Delete the elements that have an invalid key. The complexity is linear
|
||||
// with the size of the vector.
|
||||
void Clean();
|
||||
|
||||
const ElementType Front() const;
|
||||
const ElementType Back() const;
|
||||
|
||||
// Delete invalid trailing elements and return the last valid element in the
|
||||
// set.
|
||||
const ElementType CleanBack();
|
||||
|
||||
// Returns a pointer to the start or end of the backing storage.
|
||||
const ElementType* StorageBegin() const;
|
||||
const ElementType* StorageEnd() const;
|
||||
ElementType* StorageBegin();
|
||||
ElementType* StorageEnd();
|
||||
|
||||
// Returns the index of the element within the backing storage. The element
|
||||
// must belong to the backing storage.
|
||||
size_t ElementIndex(const ElementType* element) const;
|
||||
|
||||
// Returns the element at the specified index in the backing storage.
|
||||
const ElementType* ElementAt(size_t index) const;
|
||||
ElementType* ElementAt(size_t index);
|
||||
|
||||
static const ElementType* FirstValidElement(const ElementType* from,
|
||||
const ElementType* end);
|
||||
|
||||
void CacheMinElement();
|
||||
const ElementType CachedMinElement() const;
|
||||
|
||||
bool ShouldReclaimMemory() const;
|
||||
void ReclaimMemory();
|
||||
|
||||
bool IsUsingVector() const { return vector_ != NULL; }
|
||||
void set_sorted(bool sorted) { sorted_ = sorted; }
|
||||
|
||||
// We cache some data commonly required by users to improve performance.
|
||||
// We cannot cache pointers to elements as we do not control the backing
|
||||
// storage.
|
||||
bool valid_cached_min_;
|
||||
size_t cached_min_index_; // Valid iff `valid_cached_min_` is true.
|
||||
KeyType cached_min_key_; // Valid iff `valid_cached_min_` is true.
|
||||
|
||||
// Indicates whether the elements are sorted.
|
||||
bool sorted_;
|
||||
|
||||
// This represents the number of (valid) elements in this set.
|
||||
size_t size_;
|
||||
|
||||
// The backing storage is either the array of preallocated elements or the
|
||||
// vector. The structure starts by using the preallocated elements, and
|
||||
// transitions (permanently) to using the vector once more than
|
||||
// kNPreallocatedElements are used.
|
||||
// Elements are only invalidated when using the vector. The preallocated
|
||||
// storage always only contains valid elements.
|
||||
ElementType preallocated_[kNPreallocatedElements];
|
||||
std::vector<ElementType>* vector_;
|
||||
|
||||
#ifdef VIXL_DEBUG
|
||||
// Iterators acquire and release this monitor. While a set is acquired,
|
||||
// certain operations are illegal to ensure that the iterator will
|
||||
// correctly iterate over the elements in the set.
|
||||
int monitor_;
|
||||
int monitor() const { return monitor_; }
|
||||
void Acquire() { monitor_++; }
|
||||
void Release() {
|
||||
monitor_--;
|
||||
VIXL_ASSERT(monitor_ >= 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
friend class InvalSetIterator<InvalSet<TEMPLATE_INVALSET_P_DEF> >;
|
||||
typedef ElementType _ElementType;
|
||||
typedef KeyType _KeyType;
|
||||
};
|
||||
|
||||
|
||||
template<class S> class InvalSetIterator {
|
||||
private:
|
||||
// Redefine types to mirror the associated set types.
|
||||
typedef typename S::_ElementType ElementType;
|
||||
typedef typename S::_KeyType KeyType;
|
||||
|
||||
public:
|
||||
explicit InvalSetIterator(S* inval_set);
|
||||
~InvalSetIterator();
|
||||
|
||||
ElementType* Current() const;
|
||||
void Advance();
|
||||
bool Done() const;
|
||||
|
||||
// Mark this iterator as 'done'.
|
||||
void Finish();
|
||||
|
||||
// Delete the current element and advance the iterator to point to the next
|
||||
// element.
|
||||
void DeleteCurrentAndAdvance();
|
||||
|
||||
static bool IsValid(const ElementType& element);
|
||||
static KeyType Key(const ElementType& element);
|
||||
|
||||
protected:
|
||||
void MoveToValidElement();
|
||||
|
||||
// Indicates if the iterator is looking at the vector or at the preallocated
|
||||
// elements.
|
||||
const bool using_vector_;
|
||||
// Used when looking at the preallocated elements, or in debug mode when using
|
||||
// the vector to track how many times the iterator has advanced.
|
||||
size_t index_;
|
||||
typename std::vector<ElementType>::iterator iterator_;
|
||||
S* inval_set_;
|
||||
};
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
InvalSet<TEMPLATE_INVALSET_P_DEF>::InvalSet()
|
||||
: valid_cached_min_(false),
|
||||
sorted_(true), size_(0), vector_(NULL) {
|
||||
#ifdef VIXL_DEBUG
|
||||
monitor_ = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
InvalSet<TEMPLATE_INVALSET_P_DEF>::~InvalSet() {
|
||||
VIXL_ASSERT(monitor_ == 0);
|
||||
delete vector_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::insert(const ElementType& element) {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
VIXL_ASSERT(IsValid(element));
|
||||
VIXL_ASSERT(Search(element) == NULL);
|
||||
set_sorted(empty() || (sorted_ && (element > CleanBack())));
|
||||
if (IsUsingVector()) {
|
||||
vector_->push_back(element);
|
||||
} else {
|
||||
if (size_ < kNPreallocatedElements) {
|
||||
preallocated_[size_] = element;
|
||||
} else {
|
||||
// Transition to using the vector.
|
||||
vector_ = new std::vector<ElementType>(preallocated_,
|
||||
preallocated_ + size_);
|
||||
vector_->push_back(element);
|
||||
}
|
||||
}
|
||||
size_++;
|
||||
|
||||
if (valid_cached_min_ && (element < min_element())) {
|
||||
cached_min_index_ = IsUsingVector() ? vector_->size() - 1 : size_ - 1;
|
||||
cached_min_key_ = Key(element);
|
||||
valid_cached_min_ = true;
|
||||
}
|
||||
|
||||
if (ShouldReclaimMemory()) {
|
||||
ReclaimMemory();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::erase(const ElementType& element) {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
VIXL_ASSERT(IsValid(element));
|
||||
ElementType* local_element = Search(element);
|
||||
if (local_element != NULL) {
|
||||
EraseInternal(local_element);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::Search(
|
||||
const ElementType& element) {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (empty()) {
|
||||
return NULL;
|
||||
}
|
||||
if (ShouldReclaimMemory()) {
|
||||
ReclaimMemory();
|
||||
}
|
||||
if (!sorted_) {
|
||||
Sort(kHardSort);
|
||||
}
|
||||
if (!valid_cached_min_) {
|
||||
CacheMinElement();
|
||||
}
|
||||
return BinarySearch(element, ElementAt(cached_min_index_), StorageEnd());
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::size() const {
|
||||
return size_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::empty() const {
|
||||
return size_ == 0;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::clear() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
size_ = 0;
|
||||
if (IsUsingVector()) {
|
||||
vector_->clear();
|
||||
}
|
||||
set_sorted(true);
|
||||
valid_cached_min_ = false;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::min_element() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
VIXL_ASSERT(!empty());
|
||||
CacheMinElement();
|
||||
return *ElementAt(cached_min_index_);
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
KeyType InvalSet<TEMPLATE_INVALSET_P_DEF>::min_element_key() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (valid_cached_min_) {
|
||||
return cached_min_key_;
|
||||
} else {
|
||||
return Key(min_element());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::IsValid(const ElementType& element) {
|
||||
return Key(element) != kInvalidKey;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::EraseInternal(ElementType* element) {
|
||||
// Note that this function must be safe even while an iterator has acquired
|
||||
// this set.
|
||||
VIXL_ASSERT(element != NULL);
|
||||
size_t deleted_index = ElementIndex(element);
|
||||
if (IsUsingVector()) {
|
||||
VIXL_ASSERT((&(vector_->front()) <= element) &&
|
||||
(element <= &(vector_->back())));
|
||||
SetKey(element, kInvalidKey);
|
||||
} else {
|
||||
VIXL_ASSERT((preallocated_ <= element) &&
|
||||
(element < (preallocated_ + kNPreallocatedElements)));
|
||||
ElementType* end = preallocated_ + kNPreallocatedElements;
|
||||
size_t copy_size = sizeof(*element) * (end - element - 1);
|
||||
memmove(element, element + 1, copy_size);
|
||||
}
|
||||
size_--;
|
||||
|
||||
if (valid_cached_min_ &&
|
||||
(deleted_index == cached_min_index_)) {
|
||||
if (sorted_ && !empty()) {
|
||||
const ElementType* min = FirstValidElement(element, StorageEnd());
|
||||
cached_min_index_ = ElementIndex(min);
|
||||
cached_min_key_ = Key(*min);
|
||||
valid_cached_min_ = true;
|
||||
} else {
|
||||
valid_cached_min_ = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::BinarySearch(
|
||||
const ElementType& element, ElementType* start, ElementType* end) const {
|
||||
if (start == end) {
|
||||
return NULL;
|
||||
}
|
||||
VIXL_ASSERT(sorted_);
|
||||
VIXL_ASSERT(start < end);
|
||||
VIXL_ASSERT(!empty());
|
||||
|
||||
// Perform a binary search through the elements while ignoring invalid
|
||||
// elements.
|
||||
ElementType* elements = start;
|
||||
size_t low = 0;
|
||||
size_t high = (end - start) - 1;
|
||||
while (low < high) {
|
||||
// Find valid bounds.
|
||||
while (!IsValid(elements[low]) && (low < high)) ++low;
|
||||
while (!IsValid(elements[high]) && (low < high)) --high;
|
||||
VIXL_ASSERT(low <= high);
|
||||
// Avoid overflow when computing the middle index.
|
||||
size_t middle = low / 2 + high / 2 + (low & high & 1);
|
||||
if ((middle == low) || (middle == high)) {
|
||||
break;
|
||||
}
|
||||
while (!IsValid(elements[middle]) && (middle < high - 1)) ++middle;
|
||||
while (!IsValid(elements[middle]) && (low + 1 < middle)) --middle;
|
||||
if (!IsValid(elements[middle])) {
|
||||
break;
|
||||
}
|
||||
if (elements[middle] < element) {
|
||||
low = middle;
|
||||
} else {
|
||||
high = middle;
|
||||
}
|
||||
}
|
||||
|
||||
if (elements[low] == element) return &elements[low];
|
||||
if (elements[high] == element) return &elements[high];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Sort(SortType sort_type) {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (sort_type == kSoftSort) {
|
||||
if (sorted_) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
Clean();
|
||||
std::sort(StorageBegin(), StorageEnd());
|
||||
|
||||
set_sorted(true);
|
||||
cached_min_index_ = 0;
|
||||
cached_min_key_ = Key(Front());
|
||||
valid_cached_min_ = true;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Clean() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (empty() || !IsUsingVector()) {
|
||||
return;
|
||||
}
|
||||
// Manually iterate through the vector storage to discard invalid elements.
|
||||
ElementType* start = &(vector_->front());
|
||||
ElementType* end = start + vector_->size();
|
||||
ElementType* c = start;
|
||||
ElementType* first_invalid;
|
||||
ElementType* first_valid;
|
||||
ElementType* next_invalid;
|
||||
|
||||
while (c < end && IsValid(*c)) { c++; }
|
||||
first_invalid = c;
|
||||
|
||||
while (c < end) {
|
||||
while (c < end && !IsValid(*c)) { c++; }
|
||||
first_valid = c;
|
||||
while (c < end && IsValid(*c)) { c++; }
|
||||
next_invalid = c;
|
||||
|
||||
ptrdiff_t n_moved_elements = (next_invalid - first_valid);
|
||||
memmove(first_invalid, first_valid, n_moved_elements * sizeof(*c));
|
||||
first_invalid = first_invalid + n_moved_elements;
|
||||
c = next_invalid;
|
||||
}
|
||||
|
||||
// Delete the trailing invalid elements.
|
||||
vector_->erase(vector_->begin() + (first_invalid - start), vector_->end());
|
||||
VIXL_ASSERT(vector_->size() == size_);
|
||||
|
||||
if (sorted_) {
|
||||
valid_cached_min_ = true;
|
||||
cached_min_index_ = 0;
|
||||
cached_min_key_ = Key(*ElementAt(0));
|
||||
} else {
|
||||
valid_cached_min_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Front() const {
|
||||
VIXL_ASSERT(!empty());
|
||||
return IsUsingVector() ? vector_->front() : preallocated_[0];
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Back() const {
|
||||
VIXL_ASSERT(!empty());
|
||||
return IsUsingVector() ? vector_->back() : preallocated_[size_ - 1];
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::CleanBack() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
if (IsUsingVector()) {
|
||||
// Delete the invalid trailing elements.
|
||||
typename std::vector<ElementType>::reverse_iterator it = vector_->rbegin();
|
||||
while (!IsValid(*it)) {
|
||||
it++;
|
||||
}
|
||||
vector_->erase(it.base(), vector_->end());
|
||||
}
|
||||
return Back();
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() const {
|
||||
return IsUsingVector() ? &(vector_->front()) : preallocated_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() const {
|
||||
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() {
|
||||
return IsUsingVector() ? &(vector_->front()) : preallocated_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() {
|
||||
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::ElementIndex(
|
||||
const ElementType* element) const {
|
||||
VIXL_ASSERT((StorageBegin() <= element) && (element < StorageEnd()));
|
||||
return element - StorageBegin();
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::ElementAt(
|
||||
size_t index) const {
|
||||
VIXL_ASSERT(
|
||||
(IsUsingVector() && (index < vector_->size())) || (index < size_));
|
||||
return StorageBegin() + index;
|
||||
}
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::ElementAt(size_t index) {
|
||||
VIXL_ASSERT(
|
||||
(IsUsingVector() && (index < vector_->size())) || (index < size_));
|
||||
return StorageBegin() + index;
|
||||
}
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::FirstValidElement(
|
||||
const ElementType* from, const ElementType* end) {
|
||||
while ((from < end) && !IsValid(*from)) {
|
||||
from++;
|
||||
}
|
||||
return from;
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::CacheMinElement() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
VIXL_ASSERT(!empty());
|
||||
|
||||
if (valid_cached_min_) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (sorted_) {
|
||||
const ElementType* min = FirstValidElement(StorageBegin(), StorageEnd());
|
||||
cached_min_index_ = ElementIndex(min);
|
||||
cached_min_key_ = Key(*min);
|
||||
valid_cached_min_ = true;
|
||||
} else {
|
||||
Sort(kHardSort);
|
||||
}
|
||||
VIXL_ASSERT(valid_cached_min_);
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::ShouldReclaimMemory() const {
|
||||
if (!IsUsingVector()) {
|
||||
return false;
|
||||
}
|
||||
size_t n_invalid_elements = vector_->size() - size_;
|
||||
return (n_invalid_elements > RECLAIM_FROM) &&
|
||||
(n_invalid_elements > vector_->size() / RECLAIM_FACTOR);
|
||||
}
|
||||
|
||||
|
||||
template<TEMPLATE_INVALSET_P_DECL>
|
||||
void InvalSet<TEMPLATE_INVALSET_P_DEF>::ReclaimMemory() {
|
||||
VIXL_ASSERT(monitor() == 0);
|
||||
Clean();
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
InvalSetIterator<S>::InvalSetIterator(S* inval_set)
|
||||
: using_vector_((inval_set != NULL) && inval_set->IsUsingVector()),
|
||||
index_(0),
|
||||
inval_set_(inval_set) {
|
||||
if (inval_set != NULL) {
|
||||
inval_set->Sort(S::kSoftSort);
|
||||
#ifdef VIXL_DEBUG
|
||||
inval_set->Acquire();
|
||||
#endif
|
||||
if (using_vector_) {
|
||||
iterator_ = typename std::vector<ElementType>::iterator(
|
||||
inval_set_->vector_->begin());
|
||||
}
|
||||
MoveToValidElement();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
InvalSetIterator<S>::~InvalSetIterator() {
|
||||
#ifdef VIXL_DEBUG
|
||||
if (inval_set_ != NULL) {
|
||||
inval_set_->Release();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
typename S::_ElementType* InvalSetIterator<S>::Current() const {
|
||||
VIXL_ASSERT(!Done());
|
||||
if (using_vector_) {
|
||||
return &(*iterator_);
|
||||
} else {
|
||||
return &(inval_set_->preallocated_[index_]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
void InvalSetIterator<S>::Advance() {
|
||||
VIXL_ASSERT(!Done());
|
||||
if (using_vector_) {
|
||||
iterator_++;
|
||||
#ifdef VIXL_DEBUG
|
||||
index_++;
|
||||
#endif
|
||||
MoveToValidElement();
|
||||
} else {
|
||||
index_++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
bool InvalSetIterator<S>::Done() const {
|
||||
if (using_vector_) {
|
||||
bool done = (iterator_ == inval_set_->vector_->end());
|
||||
VIXL_ASSERT(done == (index_ == inval_set_->size()));
|
||||
return done;
|
||||
} else {
|
||||
return index_ == inval_set_->size();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
void InvalSetIterator<S>::Finish() {
|
||||
VIXL_ASSERT(inval_set_->sorted_);
|
||||
if (using_vector_) {
|
||||
iterator_ = inval_set_->vector_->end();
|
||||
}
|
||||
index_ = inval_set_->size();
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
void InvalSetIterator<S>::DeleteCurrentAndAdvance() {
|
||||
if (using_vector_) {
|
||||
inval_set_->EraseInternal(&(*iterator_));
|
||||
MoveToValidElement();
|
||||
} else {
|
||||
inval_set_->EraseInternal(inval_set_->preallocated_ + index_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
bool InvalSetIterator<S>::IsValid(const ElementType& element) {
|
||||
return S::IsValid(element);
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
typename S::_KeyType InvalSetIterator<S>::Key(const ElementType& element) {
|
||||
return S::Key(element);
|
||||
}
|
||||
|
||||
|
||||
template<class S>
|
||||
void InvalSetIterator<S>::MoveToValidElement() {
|
||||
if (using_vector_) {
|
||||
while ((iterator_ != inval_set_->vector_->end()) && !IsValid(*iterator_)) {
|
||||
iterator_++;
|
||||
}
|
||||
} else {
|
||||
VIXL_ASSERT(inval_set_->empty() || IsValid(inval_set_->preallocated_[0]));
|
||||
// Nothing to do.
|
||||
}
|
||||
}
|
||||
|
||||
#undef TEMPLATE_INVALSET_P_DECL
|
||||
#undef TEMPLATE_INVALSET_P_DEF
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_INVALSET_H_
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
142
disas/libvixl/vixl/utils.cc
Normal file
142
disas/libvixl/vixl/utils.cc
Normal file
@ -0,0 +1,142 @@
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "vixl/utils.h"
|
||||
#include <stdio.h>
|
||||
|
||||
namespace vixl {
|
||||
|
||||
uint32_t float_to_rawbits(float value) {
|
||||
uint32_t bits = 0;
|
||||
memcpy(&bits, &value, 4);
|
||||
return bits;
|
||||
}
|
||||
|
||||
|
||||
uint64_t double_to_rawbits(double value) {
|
||||
uint64_t bits = 0;
|
||||
memcpy(&bits, &value, 8);
|
||||
return bits;
|
||||
}
|
||||
|
||||
|
||||
float rawbits_to_float(uint32_t bits) {
|
||||
float value = 0.0;
|
||||
memcpy(&value, &bits, 4);
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
double rawbits_to_double(uint64_t bits) {
|
||||
double value = 0.0;
|
||||
memcpy(&value, &bits, 8);
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
uint32_t float_sign(float val) {
|
||||
uint32_t rawbits = float_to_rawbits(val);
|
||||
return unsigned_bitextract_32(31, 31, rawbits);
|
||||
}
|
||||
|
||||
|
||||
uint32_t float_exp(float val) {
|
||||
uint32_t rawbits = float_to_rawbits(val);
|
||||
return unsigned_bitextract_32(30, 23, rawbits);
|
||||
}
|
||||
|
||||
|
||||
uint32_t float_mantissa(float val) {
|
||||
uint32_t rawbits = float_to_rawbits(val);
|
||||
return unsigned_bitextract_32(22, 0, rawbits);
|
||||
}
|
||||
|
||||
|
||||
uint32_t double_sign(double val) {
|
||||
uint64_t rawbits = double_to_rawbits(val);
|
||||
return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, rawbits));
|
||||
}
|
||||
|
||||
|
||||
uint32_t double_exp(double val) {
|
||||
uint64_t rawbits = double_to_rawbits(val);
|
||||
return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, rawbits));
|
||||
}
|
||||
|
||||
|
||||
uint64_t double_mantissa(double val) {
|
||||
uint64_t rawbits = double_to_rawbits(val);
|
||||
return unsigned_bitextract_64(51, 0, rawbits);
|
||||
}
|
||||
|
||||
|
||||
float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
|
||||
uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
|
||||
return rawbits_to_float(bits);
|
||||
}
|
||||
|
||||
|
||||
double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
|
||||
uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
|
||||
return rawbits_to_double(bits);
|
||||
}
|
||||
|
||||
|
||||
int float16classify(float16 value) {
|
||||
uint16_t exponent_max = (1 << 5) - 1;
|
||||
uint16_t exponent_mask = exponent_max << 10;
|
||||
uint16_t mantissa_mask = (1 << 10) - 1;
|
||||
|
||||
uint16_t exponent = (value & exponent_mask) >> 10;
|
||||
uint16_t mantissa = value & mantissa_mask;
|
||||
if (exponent == 0) {
|
||||
if (mantissa == 0) {
|
||||
return FP_ZERO;
|
||||
}
|
||||
return FP_SUBNORMAL;
|
||||
} else if (exponent == exponent_max) {
|
||||
if (mantissa == 0) {
|
||||
return FP_INFINITE;
|
||||
}
|
||||
return FP_NAN;
|
||||
}
|
||||
return FP_NORMAL;
|
||||
}
|
||||
|
||||
|
||||
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
|
||||
VIXL_ASSERT((reg_size % 8) == 0);
|
||||
int count = 0;
|
||||
for (unsigned i = 0; i < (reg_size / 16); i++) {
|
||||
if ((imm & 0xffff) == 0) {
|
||||
count++;
|
||||
}
|
||||
imm >>= 16;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
} // namespace vixl
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2013, ARM Limited
|
||||
// Copyright 2015, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
@ -27,16 +27,17 @@
|
||||
#ifndef VIXL_UTILS_H
|
||||
#define VIXL_UTILS_H
|
||||
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
#include "globals.h"
|
||||
#include <cmath>
|
||||
#include "vixl/globals.h"
|
||||
#include "vixl/compiler-intrinsics.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// Macros for compile-time format checking.
|
||||
#if defined(__GNUC__)
|
||||
#if GCC_VERSION_OR_NEWER(4, 4, 0)
|
||||
#define PRINTF_CHECK(format_index, varargs_index) \
|
||||
__attribute__((format(printf, format_index, varargs_index)))
|
||||
__attribute__((format(gnu_printf, format_index, varargs_index)))
|
||||
#else
|
||||
#define PRINTF_CHECK(format_index, varargs_index)
|
||||
#endif
|
||||
@ -53,9 +54,9 @@ inline bool is_uintn(unsigned n, int64_t x) {
|
||||
return !(x >> n);
|
||||
}
|
||||
|
||||
inline unsigned truncate_to_intn(unsigned n, int64_t x) {
|
||||
inline uint32_t truncate_to_intn(unsigned n, int64_t x) {
|
||||
VIXL_ASSERT((0 < n) && (n < 64));
|
||||
return (x & ((INT64_C(1) << n) - 1));
|
||||
return static_cast<uint32_t>(x & ((INT64_C(1) << n) - 1));
|
||||
}
|
||||
|
||||
#define INT_1_TO_63_LIST(V) \
|
||||
@ -73,7 +74,7 @@ inline bool is_int##N(int64_t x) { return is_intn(N, x); }
|
||||
#define DECLARE_IS_UINT_N(N) \
|
||||
inline bool is_uint##N(int64_t x) { return is_uintn(N, x); }
|
||||
#define DECLARE_TRUNCATE_TO_INT_N(N) \
|
||||
inline int truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
|
||||
inline uint32_t truncate_to_int##N(int x) { return truncate_to_intn(N, x); }
|
||||
INT_1_TO_63_LIST(DECLARE_IS_INT_N)
|
||||
INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
|
||||
INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
|
||||
@ -104,12 +105,24 @@ uint64_t double_to_rawbits(double value);
|
||||
float rawbits_to_float(uint32_t bits);
|
||||
double rawbits_to_double(uint64_t bits);
|
||||
|
||||
uint32_t float_sign(float val);
|
||||
uint32_t float_exp(float val);
|
||||
uint32_t float_mantissa(float val);
|
||||
uint32_t double_sign(double val);
|
||||
uint32_t double_exp(double val);
|
||||
uint64_t double_mantissa(double val);
|
||||
|
||||
float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
|
||||
double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
|
||||
|
||||
// An fpclassify() function for 16-bit half-precision floats.
|
||||
int float16classify(float16 value);
|
||||
|
||||
// NaN tests.
|
||||
inline bool IsSignallingNaN(double num) {
|
||||
const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
|
||||
uint64_t raw = double_to_rawbits(num);
|
||||
if (isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
|
||||
if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -119,30 +132,37 @@ inline bool IsSignallingNaN(double num) {
|
||||
inline bool IsSignallingNaN(float num) {
|
||||
const uint32_t kFP32QuietNaNMask = 0x00400000;
|
||||
uint32_t raw = float_to_rawbits(num);
|
||||
if (isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
|
||||
if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
inline bool IsSignallingNaN(float16 num) {
|
||||
const uint16_t kFP16QuietNaNMask = 0x0200;
|
||||
return (float16classify(num) == FP_NAN) &&
|
||||
((num & kFP16QuietNaNMask) == 0);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline bool IsQuietNaN(T num) {
|
||||
return isnan(num) && !IsSignallingNaN(num);
|
||||
return std::isnan(num) && !IsSignallingNaN(num);
|
||||
}
|
||||
|
||||
|
||||
// Convert the NaN in 'num' to a quiet NaN.
|
||||
inline double ToQuietNaN(double num) {
|
||||
const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
|
||||
VIXL_ASSERT(isnan(num));
|
||||
VIXL_ASSERT(std::isnan(num));
|
||||
return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask);
|
||||
}
|
||||
|
||||
|
||||
inline float ToQuietNaN(float num) {
|
||||
const uint32_t kFP32QuietNaNMask = 0x00400000;
|
||||
VIXL_ASSERT(isnan(num));
|
||||
VIXL_ASSERT(std::isnan(num));
|
||||
return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask);
|
||||
}
|
||||
|
||||
@ -158,16 +178,71 @@ inline float FusedMultiplyAdd(float op1, float op2, float a) {
|
||||
}
|
||||
|
||||
|
||||
// Bit counting.
|
||||
int CountLeadingZeros(uint64_t value, int width);
|
||||
int CountLeadingSignBits(int64_t value, int width);
|
||||
int CountTrailingZeros(uint64_t value, int width);
|
||||
int CountSetBits(uint64_t value, int width);
|
||||
uint64_t LowestSetBit(uint64_t value);
|
||||
bool IsPowerOf2(int64_t value);
|
||||
inline uint64_t LowestSetBit(uint64_t value) {
|
||||
return value & -value;
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline int HighestSetBitPosition(T value) {
|
||||
VIXL_ASSERT(value != 0);
|
||||
return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
|
||||
}
|
||||
|
||||
|
||||
template<typename V>
|
||||
inline int WhichPowerOf2(V value) {
|
||||
VIXL_ASSERT(IsPowerOf2(value));
|
||||
return CountTrailingZeros(value);
|
||||
}
|
||||
|
||||
|
||||
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
|
||||
|
||||
|
||||
template <typename T>
|
||||
T ReverseBits(T value) {
|
||||
VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
|
||||
(sizeof(value) == 4) || (sizeof(value) == 8));
|
||||
T result = 0;
|
||||
for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
|
||||
result = (result << 1) | (value & 1);
|
||||
value >>= 1;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
T ReverseBytes(T value, int block_bytes_log2) {
|
||||
VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
|
||||
VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
|
||||
// Split the 64-bit value into an 8-bit array, where b[0] is the least
|
||||
// significant byte, and b[7] is the most significant.
|
||||
uint8_t bytes[8];
|
||||
uint64_t mask = UINT64_C(0xff00000000000000);
|
||||
for (int i = 7; i >= 0; i--) {
|
||||
bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
|
||||
mask >>= 8;
|
||||
}
|
||||
|
||||
// Permutation tables for REV instructions.
|
||||
// permute_table[0] is used by REV16_x, REV16_w
|
||||
// permute_table[1] is used by REV32_x, REV_w
|
||||
// permute_table[2] is used by REV_x
|
||||
VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
|
||||
static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
|
||||
{4, 5, 6, 7, 0, 1, 2, 3},
|
||||
{0, 1, 2, 3, 4, 5, 6, 7} };
|
||||
T result = 0;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
result <<= 8;
|
||||
result |= bytes[permute_table[block_bytes_log2 - 1][i]];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// Pointer alignment
|
||||
// TODO: rename/refactor to make it specific to instructions.
|
||||
template<typename T>
|
@ -808,6 +808,7 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
|
||||
DeviceState *dev;
|
||||
char *nodename;
|
||||
int i;
|
||||
PCIHostState *pci;
|
||||
|
||||
dev = qdev_create(NULL, TYPE_GPEX_HOST);
|
||||
qdev_init_nofail(dev);
|
||||
@ -847,6 +848,19 @@ static void create_pcie(const VirtBoardInfo *vbi, qemu_irq *pic,
|
||||
sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, pic[irq + i]);
|
||||
}
|
||||
|
||||
pci = PCI_HOST_BRIDGE(dev);
|
||||
if (pci->bus) {
|
||||
for (i = 0; i < nb_nics; i++) {
|
||||
NICInfo *nd = &nd_table[i];
|
||||
|
||||
if (!nd->model) {
|
||||
nd->model = g_strdup("virtio");
|
||||
}
|
||||
|
||||
pci_nic_init_nofail(nd, pci->bus, nd->model, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
nodename = g_strdup_printf("/pcie@%" PRIx64, base);
|
||||
qemu_fdt_add_subnode(vbi->fdt, nodename);
|
||||
qemu_fdt_setprop_string(vbi->fdt, nodename,
|
||||
|
@ -177,16 +177,6 @@ static inline int streamid_from_addr(hwaddr addr)
|
||||
return sid;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_ENET
|
||||
static void stream_desc_show(struct SDesc *d)
|
||||
{
|
||||
qemu_log("buffer_addr = " PRIx64 "\n", d->buffer_address);
|
||||
qemu_log("nxtdesc = " PRIx64 "\n", d->nxtdesc);
|
||||
qemu_log("control = %x\n", d->control);
|
||||
qemu_log("status = %x\n", d->status);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void stream_desc_load(struct Stream *s, hwaddr addr)
|
||||
{
|
||||
struct SDesc *d = &s->desc;
|
||||
|
@ -29,77 +29,73 @@
|
||||
|
||||
static char const *imx31_ccm_reg_name(uint32_t reg)
|
||||
{
|
||||
static char unknown[20];
|
||||
|
||||
switch (reg) {
|
||||
case 0:
|
||||
case IMX31_CCM_CCMR_REG:
|
||||
return "CCMR";
|
||||
case 1:
|
||||
case IMX31_CCM_PDR0_REG:
|
||||
return "PDR0";
|
||||
case 2:
|
||||
case IMX31_CCM_PDR1_REG:
|
||||
return "PDR1";
|
||||
case 3:
|
||||
case IMX31_CCM_RCSR_REG:
|
||||
return "RCSR";
|
||||
case 4:
|
||||
case IMX31_CCM_MPCTL_REG:
|
||||
return "MPCTL";
|
||||
case 5:
|
||||
case IMX31_CCM_UPCTL_REG:
|
||||
return "UPCTL";
|
||||
case 6:
|
||||
case IMX31_CCM_SPCTL_REG:
|
||||
return "SPCTL";
|
||||
case 7:
|
||||
case IMX31_CCM_COSR_REG:
|
||||
return "COSR";
|
||||
case 8:
|
||||
case IMX31_CCM_CGR0_REG:
|
||||
return "CGR0";
|
||||
case 9:
|
||||
case IMX31_CCM_CGR1_REG:
|
||||
return "CGR1";
|
||||
case 10:
|
||||
case IMX31_CCM_CGR2_REG:
|
||||
return "CGR2";
|
||||
case 11:
|
||||
case IMX31_CCM_WIMR_REG:
|
||||
return "WIMR";
|
||||
case 12:
|
||||
case IMX31_CCM_LDC_REG:
|
||||
return "LDC";
|
||||
case 13:
|
||||
case IMX31_CCM_DCVR0_REG:
|
||||
return "DCVR0";
|
||||
case 14:
|
||||
case IMX31_CCM_DCVR1_REG:
|
||||
return "DCVR1";
|
||||
case 15:
|
||||
case IMX31_CCM_DCVR2_REG:
|
||||
return "DCVR2";
|
||||
case 16:
|
||||
case IMX31_CCM_DCVR3_REG:
|
||||
return "DCVR3";
|
||||
case 17:
|
||||
case IMX31_CCM_LTR0_REG:
|
||||
return "LTR0";
|
||||
case 18:
|
||||
case IMX31_CCM_LTR1_REG:
|
||||
return "LTR1";
|
||||
case 19:
|
||||
case IMX31_CCM_LTR2_REG:
|
||||
return "LTR2";
|
||||
case 20:
|
||||
case IMX31_CCM_LTR3_REG:
|
||||
return "LTR3";
|
||||
case 21:
|
||||
case IMX31_CCM_LTBR0_REG:
|
||||
return "LTBR0";
|
||||
case 22:
|
||||
case IMX31_CCM_LTBR1_REG:
|
||||
return "LTBR1";
|
||||
case 23:
|
||||
case IMX31_CCM_PMCR0_REG:
|
||||
return "PMCR0";
|
||||
case 24:
|
||||
case IMX31_CCM_PMCR1_REG:
|
||||
return "PMCR1";
|
||||
case 25:
|
||||
case IMX31_CCM_PDR2_REG:
|
||||
return "PDR2";
|
||||
default:
|
||||
return "???";
|
||||
sprintf(unknown, "[%d ?]", reg);
|
||||
return unknown;
|
||||
}
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_imx31_ccm = {
|
||||
.name = TYPE_IMX31_CCM,
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.version_id = 2,
|
||||
.minimum_version_id = 2,
|
||||
.fields = (VMStateField[]) {
|
||||
VMSTATE_UINT32(ccmr, IMX31CCMState),
|
||||
VMSTATE_UINT32(pdr0, IMX31CCMState),
|
||||
VMSTATE_UINT32(pdr1, IMX31CCMState),
|
||||
VMSTATE_UINT32(mpctl, IMX31CCMState),
|
||||
VMSTATE_UINT32(spctl, IMX31CCMState),
|
||||
VMSTATE_UINT32_ARRAY(cgr, IMX31CCMState, 3),
|
||||
VMSTATE_UINT32(pmcr0, IMX31CCMState),
|
||||
VMSTATE_UINT32(pmcr1, IMX31CCMState),
|
||||
VMSTATE_UINT32_ARRAY(reg, IMX31CCMState, IMX31_CCM_MAX_REG),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
};
|
||||
@ -109,10 +105,10 @@ static uint32_t imx31_ccm_get_pll_ref_clk(IMXCCMState *dev)
|
||||
uint32_t freq = 0;
|
||||
IMX31CCMState *s = IMX31_CCM(dev);
|
||||
|
||||
if ((s->ccmr & CCMR_PRCS) == 2) {
|
||||
if (s->ccmr & CCMR_FPME) {
|
||||
if ((s->reg[IMX31_CCM_CCMR_REG] & CCMR_PRCS) == 2) {
|
||||
if (s->reg[IMX31_CCM_CCMR_REG] & CCMR_FPME) {
|
||||
freq = CKIL_FREQ;
|
||||
if (s->ccmr & CCMR_FPMF) {
|
||||
if (s->reg[IMX31_CCM_CCMR_REG] & CCMR_FPMF) {
|
||||
freq *= 1024;
|
||||
}
|
||||
}
|
||||
@ -130,7 +126,8 @@ static uint32_t imx31_ccm_get_mpll_clk(IMXCCMState *dev)
|
||||
uint32_t freq;
|
||||
IMX31CCMState *s = IMX31_CCM(dev);
|
||||
|
||||
freq = imx_ccm_calc_pll(s->mpctl, imx31_ccm_get_pll_ref_clk(dev));
|
||||
freq = imx_ccm_calc_pll(s->reg[IMX31_CCM_MPCTL_REG],
|
||||
imx31_ccm_get_pll_ref_clk(dev));
|
||||
|
||||
DPRINTF("freq = %d\n", freq);
|
||||
|
||||
@ -142,7 +139,8 @@ static uint32_t imx31_ccm_get_mcu_main_clk(IMXCCMState *dev)
|
||||
uint32_t freq;
|
||||
IMX31CCMState *s = IMX31_CCM(dev);
|
||||
|
||||
if ((s->ccmr & CCMR_MDS) || !(s->ccmr & CCMR_MPE)) {
|
||||
if ((s->reg[IMX31_CCM_CCMR_REG] & CCMR_MDS) ||
|
||||
!(s->reg[IMX31_CCM_CCMR_REG] & CCMR_MPE)) {
|
||||
freq = imx31_ccm_get_pll_ref_clk(dev);
|
||||
} else {
|
||||
freq = imx31_ccm_get_mpll_clk(dev);
|
||||
@ -158,7 +156,8 @@ static uint32_t imx31_ccm_get_mcu_clk(IMXCCMState *dev)
|
||||
uint32_t freq;
|
||||
IMX31CCMState *s = IMX31_CCM(dev);
|
||||
|
||||
freq = imx31_ccm_get_mcu_main_clk(dev) / (1 + EXTRACT(s->pdr0, MCU));
|
||||
freq = imx31_ccm_get_mcu_main_clk(dev)
|
||||
/ (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], MCU));
|
||||
|
||||
DPRINTF("freq = %d\n", freq);
|
||||
|
||||
@ -170,7 +169,8 @@ static uint32_t imx31_ccm_get_hsp_clk(IMXCCMState *dev)
|
||||
uint32_t freq;
|
||||
IMX31CCMState *s = IMX31_CCM(dev);
|
||||
|
||||
freq = imx31_ccm_get_mcu_main_clk(dev) / (1 + EXTRACT(s->pdr0, HSP));
|
||||
freq = imx31_ccm_get_mcu_main_clk(dev)
|
||||
/ (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], HSP));
|
||||
|
||||
DPRINTF("freq = %d\n", freq);
|
||||
|
||||
@ -182,7 +182,8 @@ static uint32_t imx31_ccm_get_hclk_clk(IMXCCMState *dev)
|
||||
uint32_t freq;
|
||||
IMX31CCMState *s = IMX31_CCM(dev);
|
||||
|
||||
freq = imx31_ccm_get_mcu_main_clk(dev) / (1 + EXTRACT(s->pdr0, MAX));
|
||||
freq = imx31_ccm_get_mcu_main_clk(dev)
|
||||
/ (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], MAX));
|
||||
|
||||
DPRINTF("freq = %d\n", freq);
|
||||
|
||||
@ -194,7 +195,8 @@ static uint32_t imx31_ccm_get_ipg_clk(IMXCCMState *dev)
|
||||
uint32_t freq;
|
||||
IMX31CCMState *s = IMX31_CCM(dev);
|
||||
|
||||
freq = imx31_ccm_get_hclk_clk(dev) / (1 + EXTRACT(s->pdr0, IPG));
|
||||
freq = imx31_ccm_get_hclk_clk(dev)
|
||||
/ (1 + EXTRACT(s->reg[IMX31_CCM_PDR0_REG], IPG));
|
||||
|
||||
DPRINTF("freq = %d\n", freq);
|
||||
|
||||
@ -237,14 +239,24 @@ static void imx31_ccm_reset(DeviceState *dev)
|
||||
|
||||
DPRINTF("()\n");
|
||||
|
||||
s->ccmr = 0x074b0b7d;
|
||||
s->pdr0 = 0xff870b48;
|
||||
s->pdr1 = 0x49fcfe7f;
|
||||
s->mpctl = 0x04001800;
|
||||
s->cgr[0] = s->cgr[1] = s->cgr[2] = 0xffffffff;
|
||||
s->spctl = 0x04043001;
|
||||
s->pmcr0 = 0x80209828;
|
||||
s->pmcr1 = 0x00aa0000;
|
||||
memset(s->reg, 0, sizeof(uint32_t) * IMX31_CCM_MAX_REG);
|
||||
|
||||
s->reg[IMX31_CCM_CCMR_REG] = 0x074b0b7d;
|
||||
s->reg[IMX31_CCM_PDR0_REG] = 0xff870b48;
|
||||
s->reg[IMX31_CCM_PDR1_REG] = 0x49fcfe7f;
|
||||
s->reg[IMX31_CCM_RCSR_REG] = 0x007f0000;
|
||||
s->reg[IMX31_CCM_MPCTL_REG] = 0x04001800;
|
||||
s->reg[IMX31_CCM_UPCTL_REG] = 0x04051c03;
|
||||
s->reg[IMX31_CCM_SPCTL_REG] = 0x04043001;
|
||||
s->reg[IMX31_CCM_COSR_REG] = 0x00000280;
|
||||
s->reg[IMX31_CCM_CGR0_REG] = 0xffffffff;
|
||||
s->reg[IMX31_CCM_CGR1_REG] = 0xffffffff;
|
||||
s->reg[IMX31_CCM_CGR2_REG] = 0xffffffff;
|
||||
s->reg[IMX31_CCM_WIMR_REG] = 0xffffffff;
|
||||
s->reg[IMX31_CCM_LTR1_REG] = 0x00004040;
|
||||
s->reg[IMX31_CCM_PMCR0_REG] = 0x80209828;
|
||||
s->reg[IMX31_CCM_PMCR1_REG] = 0x00aa0000;
|
||||
s->reg[IMX31_CCM_PDR2_REG] = 0x00000285;
|
||||
}
|
||||
|
||||
static uint64_t imx31_ccm_read(void *opaque, hwaddr offset, unsigned size)
|
||||
@ -252,41 +264,11 @@ static uint64_t imx31_ccm_read(void *opaque, hwaddr offset, unsigned size)
|
||||
uint32 value = 0;
|
||||
IMX31CCMState *s = (IMX31CCMState *)opaque;
|
||||
|
||||
switch (offset >> 2) {
|
||||
case 0: /* CCMR */
|
||||
value = s->ccmr;
|
||||
break;
|
||||
case 1:
|
||||
value = s->pdr0;
|
||||
break;
|
||||
case 2:
|
||||
value = s->pdr1;
|
||||
break;
|
||||
case 4:
|
||||
value = s->mpctl;
|
||||
break;
|
||||
case 6:
|
||||
value = s->spctl;
|
||||
break;
|
||||
case 8:
|
||||
value = s->cgr[0];
|
||||
break;
|
||||
case 9:
|
||||
value = s->cgr[1];
|
||||
break;
|
||||
case 10:
|
||||
value = s->cgr[2];
|
||||
break;
|
||||
case 18: /* LTR1 */
|
||||
value = 0x00004040;
|
||||
break;
|
||||
case 23:
|
||||
value = s->pmcr0;
|
||||
break;
|
||||
default:
|
||||
if ((offset >> 2) < IMX31_CCM_MAX_REG) {
|
||||
value = s->reg[offset >> 2];
|
||||
} else {
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
|
||||
HWADDR_PRIx "\n", TYPE_IMX31_CCM, __func__, offset);
|
||||
break;
|
||||
}
|
||||
|
||||
DPRINTF("reg[%s] => 0x%" PRIx32 "\n", imx31_ccm_reg_name(offset >> 2),
|
||||
@ -304,29 +286,29 @@ static void imx31_ccm_write(void *opaque, hwaddr offset, uint64_t value,
|
||||
(uint32_t)value);
|
||||
|
||||
switch (offset >> 2) {
|
||||
case 0:
|
||||
s->ccmr = CCMR_FPMF | (value & 0x3b6fdfff);
|
||||
case IMX31_CCM_CCMR_REG:
|
||||
s->reg[IMX31_CCM_CCMR_REG] = CCMR_FPMF | (value & 0x3b6fdfff);
|
||||
break;
|
||||
case 1:
|
||||
s->pdr0 = value & 0xff9f3fff;
|
||||
case IMX31_CCM_PDR0_REG:
|
||||
s->reg[IMX31_CCM_PDR0_REG] = value & 0xff9f3fff;
|
||||
break;
|
||||
case 2:
|
||||
s->pdr1 = value;
|
||||
case IMX31_CCM_PDR1_REG:
|
||||
s->reg[IMX31_CCM_PDR1_REG] = value;
|
||||
break;
|
||||
case 4:
|
||||
s->mpctl = value & 0xbfff3fff;
|
||||
case IMX31_CCM_MPCTL_REG:
|
||||
s->reg[IMX31_CCM_MPCTL_REG] = value & 0xbfff3fff;
|
||||
break;
|
||||
case 6:
|
||||
s->spctl = value & 0xbfff3fff;
|
||||
case IMX31_CCM_SPCTL_REG:
|
||||
s->reg[IMX31_CCM_SPCTL_REG] = value & 0xbfff3fff;
|
||||
break;
|
||||
case 8:
|
||||
s->cgr[0] = value;
|
||||
case IMX31_CCM_CGR0_REG:
|
||||
s->reg[IMX31_CCM_CGR0_REG] = value;
|
||||
break;
|
||||
case 9:
|
||||
s->cgr[1] = value;
|
||||
case IMX31_CCM_CGR1_REG:
|
||||
s->reg[IMX31_CCM_CGR1_REG] = value;
|
||||
break;
|
||||
case 10:
|
||||
s->cgr[2] = value;
|
||||
case IMX31_CCM_CGR2_REG:
|
||||
s->reg[IMX31_CCM_CGR2_REG] = value;
|
||||
break;
|
||||
default:
|
||||
qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
|
||||
|
@ -13,6 +13,34 @@
|
||||
|
||||
#include "hw/misc/imx_ccm.h"
|
||||
|
||||
#define IMX31_CCM_CCMR_REG 0
|
||||
#define IMX31_CCM_PDR0_REG 1
|
||||
#define IMX31_CCM_PDR1_REG 2
|
||||
#define IMX31_CCM_RCSR_REG 3
|
||||
#define IMX31_CCM_MPCTL_REG 4
|
||||
#define IMX31_CCM_UPCTL_REG 5
|
||||
#define IMX31_CCM_SPCTL_REG 6
|
||||
#define IMX31_CCM_COSR_REG 7
|
||||
#define IMX31_CCM_CGR0_REG 8
|
||||
#define IMX31_CCM_CGR1_REG 9
|
||||
#define IMX31_CCM_CGR2_REG 10
|
||||
#define IMX31_CCM_WIMR_REG 11
|
||||
#define IMX31_CCM_LDC_REG 12
|
||||
#define IMX31_CCM_DCVR0_REG 13
|
||||
#define IMX31_CCM_DCVR1_REG 14
|
||||
#define IMX31_CCM_DCVR2_REG 15
|
||||
#define IMX31_CCM_DCVR3_REG 16
|
||||
#define IMX31_CCM_LTR0_REG 17
|
||||
#define IMX31_CCM_LTR1_REG 18
|
||||
#define IMX31_CCM_LTR2_REG 19
|
||||
#define IMX31_CCM_LTR3_REG 20
|
||||
#define IMX31_CCM_LTBR0_REG 21
|
||||
#define IMX31_CCM_LTBR1_REG 22
|
||||
#define IMX31_CCM_PMCR0_REG 23
|
||||
#define IMX31_CCM_PMCR1_REG 24
|
||||
#define IMX31_CCM_PDR2_REG 25
|
||||
#define IMX31_CCM_MAX_REG 26
|
||||
|
||||
/* CCMR */
|
||||
#define CCMR_FPME (1<<0)
|
||||
#define CCMR_MPE (1<<3)
|
||||
@ -53,14 +81,8 @@ typedef struct IMX31CCMState {
|
||||
/* <public> */
|
||||
MemoryRegion iomem;
|
||||
|
||||
uint32_t ccmr;
|
||||
uint32_t pdr0;
|
||||
uint32_t pdr1;
|
||||
uint32_t mpctl;
|
||||
uint32_t spctl;
|
||||
uint32_t cgr[3];
|
||||
uint32_t pmcr0;
|
||||
uint32_t pmcr1;
|
||||
uint32_t reg[IMX31_CCM_MAX_REG];
|
||||
|
||||
} IMX31CCMState;
|
||||
|
||||
#endif /* IMX31_CCM_H */
|
||||
|
Loading…
Reference in New Issue
Block a user