mirror of
https://github.com/FEX-Emu/vixl.git
synced 2025-02-10 08:52:26 +00:00
VIXL Release 1.3
Refer to the README.md and LICENCE files for details.
This commit is contained in:
parent
f37fdc0b30
commit
b0c8ae2a5f
@ -1,4 +1,4 @@
|
||||
VIXL: AArch64 Runtime Code Generation Library Version 1.2
|
||||
VIXL: AArch64 Runtime Code Generation Library Version 1.3
|
||||
=========================================================
|
||||
|
||||
Contents:
|
||||
@ -16,7 +16,7 @@ To build VIXL the following software is required:
|
||||
|
||||
1. Python 2.7
|
||||
2. SCons 2.0
|
||||
3. GCC 4.4
|
||||
3. GCC 4.4+
|
||||
|
||||
A 64-bit host machine is required, implementing an LP64 data model. VIXL has
|
||||
only been tested using GCC on Ubuntu systems.
|
||||
@ -62,6 +62,10 @@ The VIXL simulator supports only those instructions that the VIXL assembler can
|
||||
generate. The `doc` directory contains a
|
||||
[list of supported instructions](doc/supported-instructions.md).
|
||||
|
||||
The VIXL simulator was developed to run on 64-bit amd64 platforms. Whilst it
|
||||
builds and mostly works for 32-bit x86 platforms, there are a number of
|
||||
floating-point operations which do not work correctly, and a number of tests
|
||||
fail as a result.
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
@ -69,6 +69,7 @@ TARGET_SRC_FILES = {
|
||||
test/cctest.cc
|
||||
test/test-utils-a64.cc
|
||||
test/test-assembler-a64.cc
|
||||
test/test-simulator-a64.cc
|
||||
test/test-disasm-a64.cc
|
||||
test/test-fuzz-a64.cc
|
||||
test/examples/test-examples.cc
|
||||
@ -136,7 +137,10 @@ env['RANLIB'] = os.environ.get('RANLIB', env.get('RANLIB'))
|
||||
env['CC'] = os.environ.get('CC', env.get('CC'))
|
||||
env['LD'] = os.environ.get('LD', env.get('LD'))
|
||||
|
||||
env.Append(CPPFLAGS = os.environ.get('CPPFLAGS'))
|
||||
if os.environ.get('CPPFLAGS'):
|
||||
env.Append(CPPFLAGS = os.environ.get('CPPFLAGS').split())
|
||||
if os.environ.get('LINKFLAGS'):
|
||||
env.Append(LINKFLAGS = os.environ.get('LINKFLAGS').split())
|
||||
|
||||
# Always look in 'src' for include files.
|
||||
env.Append(CPPPATH = [PROJ_SRC_DIR])
|
||||
|
@ -1,6 +1,14 @@
|
||||
VIXL Change Log
|
||||
===============
|
||||
|
||||
* 1.3
|
||||
+ Address inaccuracies in the simulated floating point instructions.
|
||||
+ Implement Default-NaN floating point mode.
|
||||
+ Introduce `UseScratchRegisterScope` for controlling the use of temporary
|
||||
registers.
|
||||
+ Enable building VIXL on 32-bit hosts.
|
||||
+ Other small bug fixes and improvements.
|
||||
|
||||
* 1.2
|
||||
+ Added support for `fmadd`, `fnmadd`, `fnmsub`, `fminnm`, `fmaxnm`,
|
||||
`frinta`, `fcvtau` and `fcvtas`.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -38,6 +38,7 @@ namespace vixl {
|
||||
typedef uint64_t RegList;
|
||||
static const int kRegListSizeInBits = sizeof(RegList) * 8;
|
||||
|
||||
|
||||
// Registers.
|
||||
|
||||
// Some CPURegister methods can return Register and FPRegister types, so we
|
||||
@ -58,62 +59,62 @@ class CPURegister {
|
||||
};
|
||||
|
||||
CPURegister() : code_(0), size_(0), type_(kNoRegister) {
|
||||
ASSERT(!IsValid());
|
||||
ASSERT(IsNone());
|
||||
VIXL_ASSERT(!IsValid());
|
||||
VIXL_ASSERT(IsNone());
|
||||
}
|
||||
|
||||
CPURegister(unsigned code, unsigned size, RegisterType type)
|
||||
: code_(code), size_(size), type_(type) {
|
||||
ASSERT(IsValidOrNone());
|
||||
VIXL_ASSERT(IsValidOrNone());
|
||||
}
|
||||
|
||||
unsigned code() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return code_;
|
||||
}
|
||||
|
||||
RegisterType type() const {
|
||||
ASSERT(IsValidOrNone());
|
||||
VIXL_ASSERT(IsValidOrNone());
|
||||
return type_;
|
||||
}
|
||||
|
||||
RegList Bit() const {
|
||||
ASSERT(code_ < (sizeof(RegList) * 8));
|
||||
VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
|
||||
return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
|
||||
}
|
||||
|
||||
unsigned size() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_;
|
||||
}
|
||||
|
||||
int SizeInBytes() const {
|
||||
ASSERT(IsValid());
|
||||
ASSERT(size() % 8 == 0);
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(size() % 8 == 0);
|
||||
return size_ / 8;
|
||||
}
|
||||
|
||||
int SizeInBits() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_;
|
||||
}
|
||||
|
||||
bool Is32Bits() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_ == 32;
|
||||
}
|
||||
|
||||
bool Is64Bits() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_ == 64;
|
||||
}
|
||||
|
||||
bool IsValid() const {
|
||||
if (IsValidRegister() || IsValidFPRegister()) {
|
||||
ASSERT(!IsNone());
|
||||
VIXL_ASSERT(!IsNone());
|
||||
return true;
|
||||
} else {
|
||||
ASSERT(IsNone());
|
||||
VIXL_ASSERT(IsNone());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -132,25 +133,25 @@ class CPURegister {
|
||||
|
||||
bool IsNone() const {
|
||||
// kNoRegister types should always have size 0 and code 0.
|
||||
ASSERT((type_ != kNoRegister) || (code_ == 0));
|
||||
ASSERT((type_ != kNoRegister) || (size_ == 0));
|
||||
VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
|
||||
VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
|
||||
|
||||
return type_ == kNoRegister;
|
||||
}
|
||||
|
||||
bool Is(const CPURegister& other) const {
|
||||
ASSERT(IsValidOrNone() && other.IsValidOrNone());
|
||||
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
|
||||
return (code_ == other.code_) && (size_ == other.size_) &&
|
||||
(type_ == other.type_);
|
||||
}
|
||||
|
||||
inline bool IsZero() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return IsRegister() && (code_ == kZeroRegCode);
|
||||
}
|
||||
|
||||
inline bool IsSP() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return IsRegister() && (code_ == kSPRegInternalCode);
|
||||
}
|
||||
|
||||
@ -188,13 +189,13 @@ class Register : public CPURegister {
|
||||
explicit Register() : CPURegister() {}
|
||||
inline explicit Register(const CPURegister& other)
|
||||
: CPURegister(other.code(), other.size(), other.type()) {
|
||||
ASSERT(IsValidRegister());
|
||||
VIXL_ASSERT(IsValidRegister());
|
||||
}
|
||||
explicit Register(unsigned code, unsigned size)
|
||||
: CPURegister(code, size, kRegister) {}
|
||||
|
||||
bool IsValid() const {
|
||||
ASSERT(IsRegister() || IsNone());
|
||||
VIXL_ASSERT(IsRegister() || IsNone());
|
||||
return IsValidRegister();
|
||||
}
|
||||
|
||||
@ -216,13 +217,13 @@ class FPRegister : public CPURegister {
|
||||
inline FPRegister() : CPURegister() {}
|
||||
inline explicit FPRegister(const CPURegister& other)
|
||||
: CPURegister(other.code(), other.size(), other.type()) {
|
||||
ASSERT(IsValidFPRegister());
|
||||
VIXL_ASSERT(IsValidFPRegister());
|
||||
}
|
||||
inline FPRegister(unsigned code, unsigned size)
|
||||
: CPURegister(code, size, kFPRegister) {}
|
||||
|
||||
bool IsValid() const {
|
||||
ASSERT(IsFPRegister() || IsNone());
|
||||
VIXL_ASSERT(IsFPRegister() || IsNone());
|
||||
return IsValidFPRegister();
|
||||
}
|
||||
|
||||
@ -306,30 +307,30 @@ class CPURegList {
|
||||
CPURegister reg4 = NoCPUReg)
|
||||
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
|
||||
size_(reg1.size()), type_(reg1.type()) {
|
||||
ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
inline CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
|
||||
: list_(list), size_(size), type_(type) {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
inline CPURegList(CPURegister::RegisterType type, unsigned size,
|
||||
unsigned first_reg, unsigned last_reg)
|
||||
: size_(size), type_(type) {
|
||||
ASSERT(((type == CPURegister::kRegister) &&
|
||||
(last_reg < kNumberOfRegisters)) ||
|
||||
((type == CPURegister::kFPRegister) &&
|
||||
(last_reg < kNumberOfFPRegisters)));
|
||||
ASSERT(last_reg >= first_reg);
|
||||
list_ = (1UL << (last_reg + 1)) - 1;
|
||||
list_ &= ~((1UL << first_reg) - 1);
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(((type == CPURegister::kRegister) &&
|
||||
(last_reg < kNumberOfRegisters)) ||
|
||||
((type == CPURegister::kFPRegister) &&
|
||||
(last_reg < kNumberOfFPRegisters)));
|
||||
VIXL_ASSERT(last_reg >= first_reg);
|
||||
list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
|
||||
list_ &= ~((UINT64_C(1) << first_reg) - 1);
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
inline CPURegister::RegisterType type() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return type_;
|
||||
}
|
||||
|
||||
@ -337,9 +338,9 @@ class CPURegList {
|
||||
// this list are left unchanged. The type and size of the registers in the
|
||||
// 'other' list must match those in this list.
|
||||
void Combine(const CPURegList& other) {
|
||||
ASSERT(IsValid());
|
||||
ASSERT(other.type() == type_);
|
||||
ASSERT(other.RegisterSizeInBits() == size_);
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(other.type() == type_);
|
||||
VIXL_ASSERT(other.RegisterSizeInBits() == size_);
|
||||
list_ |= other.list();
|
||||
}
|
||||
|
||||
@ -347,44 +348,49 @@ class CPURegList {
|
||||
// do not exist in this list are ignored. The type and size of the registers
|
||||
// in the 'other' list must match those in this list.
|
||||
void Remove(const CPURegList& other) {
|
||||
ASSERT(IsValid());
|
||||
ASSERT(other.type() == type_);
|
||||
ASSERT(other.RegisterSizeInBits() == size_);
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(other.type() == type_);
|
||||
VIXL_ASSERT(other.RegisterSizeInBits() == size_);
|
||||
list_ &= ~other.list();
|
||||
}
|
||||
|
||||
// Variants of Combine and Remove which take a single register.
|
||||
inline void Combine(const CPURegister& other) {
|
||||
ASSERT(other.type() == type_);
|
||||
ASSERT(other.size() == size_);
|
||||
VIXL_ASSERT(other.type() == type_);
|
||||
VIXL_ASSERT(other.size() == size_);
|
||||
Combine(other.code());
|
||||
}
|
||||
|
||||
inline void Remove(const CPURegister& other) {
|
||||
ASSERT(other.type() == type_);
|
||||
ASSERT(other.size() == size_);
|
||||
VIXL_ASSERT(other.type() == type_);
|
||||
VIXL_ASSERT(other.size() == size_);
|
||||
Remove(other.code());
|
||||
}
|
||||
|
||||
// Variants of Combine and Remove which take a single register by its code;
|
||||
// the type and size of the register is inferred from this list.
|
||||
inline void Combine(int code) {
|
||||
ASSERT(IsValid());
|
||||
ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ |= (1UL << code);
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ |= (UINT64_C(1) << code);
|
||||
}
|
||||
|
||||
inline void Remove(int code) {
|
||||
ASSERT(IsValid());
|
||||
ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ &= ~(1UL << code);
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ &= ~(UINT64_C(1) << code);
|
||||
}
|
||||
|
||||
inline RegList list() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return list_;
|
||||
}
|
||||
|
||||
inline void set_list(RegList new_list) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
list_ = new_list;
|
||||
}
|
||||
|
||||
// Remove all callee-saved registers from the list. This can be useful when
|
||||
// preparing registers for an AAPCS64 function call, for example.
|
||||
void RemoveCalleeSaved();
|
||||
@ -401,28 +407,33 @@ class CPURegList {
|
||||
static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
|
||||
|
||||
inline bool IsEmpty() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return list_ == 0;
|
||||
}
|
||||
|
||||
inline bool IncludesAliasOf(const CPURegister& other) const {
|
||||
ASSERT(IsValid());
|
||||
return (type_ == other.type()) && (other.Bit() & list_);
|
||||
VIXL_ASSERT(IsValid());
|
||||
return (type_ == other.type()) && ((other.Bit() & list_) != 0);
|
||||
}
|
||||
|
||||
inline bool IncludesAliasOf(int code) const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return ((code & list_) != 0);
|
||||
}
|
||||
|
||||
inline int Count() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return CountSetBits(list_, kRegListSizeInBits);
|
||||
}
|
||||
|
||||
inline unsigned RegisterSizeInBits() const {
|
||||
ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_;
|
||||
}
|
||||
|
||||
inline unsigned RegisterSizeInBytes() const {
|
||||
int size_in_bits = RegisterSizeInBits();
|
||||
ASSERT((size_in_bits % 8) == 0);
|
||||
VIXL_ASSERT((size_in_bits % 8) == 0);
|
||||
return size_in_bits / 8;
|
||||
}
|
||||
|
||||
@ -478,27 +489,27 @@ class Operand {
|
||||
Operand ToExtendedRegister() const;
|
||||
|
||||
int64_t immediate() const {
|
||||
ASSERT(IsImmediate());
|
||||
VIXL_ASSERT(IsImmediate());
|
||||
return immediate_;
|
||||
}
|
||||
|
||||
Register reg() const {
|
||||
ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
return reg_;
|
||||
}
|
||||
|
||||
Shift shift() const {
|
||||
ASSERT(IsShiftedRegister());
|
||||
VIXL_ASSERT(IsShiftedRegister());
|
||||
return shift_;
|
||||
}
|
||||
|
||||
Extend extend() const {
|
||||
ASSERT(IsExtendedRegister());
|
||||
VIXL_ASSERT(IsExtendedRegister());
|
||||
return extend_;
|
||||
}
|
||||
|
||||
unsigned shift_amount() const {
|
||||
ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
return shift_amount_;
|
||||
}
|
||||
|
||||
@ -557,7 +568,7 @@ class Label {
|
||||
Label() : is_bound_(false), link_(NULL), target_(NULL) {}
|
||||
~Label() {
|
||||
// If the label has been linked to, it needs to be bound to a target.
|
||||
ASSERT(!IsLinked() || IsBound());
|
||||
VIXL_ASSERT(!IsLinked() || IsBound());
|
||||
}
|
||||
|
||||
inline Instruction* link() const { return link_; }
|
||||
@ -644,7 +655,7 @@ class Assembler {
|
||||
void bind(Label* label);
|
||||
int UpdateAndGetByteOffsetTo(Label* label);
|
||||
inline int UpdateAndGetInstructionOffsetTo(Label* label) {
|
||||
ASSERT(Label::kEndOfChain == 0);
|
||||
VIXL_ASSERT(Label::kEndOfChain == 0);
|
||||
return UpdateAndGetByteOffsetTo(label) >> kInstructionSizeLog2;
|
||||
}
|
||||
|
||||
@ -849,8 +860,8 @@ class Assembler {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.size());
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
|
||||
}
|
||||
|
||||
@ -859,15 +870,15 @@ class Assembler {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.size());
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
bfm(rd, rn, lsb, lsb + width - 1);
|
||||
}
|
||||
|
||||
// Sbfm aliases.
|
||||
// Arithmetic shift right.
|
||||
inline void asr(const Register& rd, const Register& rn, unsigned shift) {
|
||||
ASSERT(shift < rd.size());
|
||||
VIXL_ASSERT(shift < rd.size());
|
||||
sbfm(rd, rn, shift, rd.size() - 1);
|
||||
}
|
||||
|
||||
@ -876,8 +887,8 @@ class Assembler {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.size());
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
|
||||
}
|
||||
|
||||
@ -886,8 +897,8 @@ class Assembler {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.size());
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
sbfm(rd, rn, lsb, lsb + width - 1);
|
||||
}
|
||||
|
||||
@ -910,13 +921,13 @@ class Assembler {
|
||||
// Logical shift left.
|
||||
inline void lsl(const Register& rd, const Register& rn, unsigned shift) {
|
||||
unsigned reg_size = rd.size();
|
||||
ASSERT(shift < reg_size);
|
||||
VIXL_ASSERT(shift < reg_size);
|
||||
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
|
||||
}
|
||||
|
||||
// Logical shift right.
|
||||
inline void lsr(const Register& rd, const Register& rn, unsigned shift) {
|
||||
ASSERT(shift < rd.size());
|
||||
VIXL_ASSERT(shift < rd.size());
|
||||
ubfm(rd, rn, shift, rd.size() - 1);
|
||||
}
|
||||
|
||||
@ -925,8 +936,8 @@ class Assembler {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.size());
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
|
||||
}
|
||||
|
||||
@ -935,8 +946,8 @@ class Assembler {
|
||||
const Register& rn,
|
||||
unsigned lsb,
|
||||
unsigned width) {
|
||||
ASSERT(width >= 1);
|
||||
ASSERT(lsb + width <= rn.size());
|
||||
VIXL_ASSERT(width >= 1);
|
||||
VIXL_ASSERT(lsb + width <= rn.size());
|
||||
ubfm(rd, rn, lsb, lsb + width - 1);
|
||||
}
|
||||
|
||||
@ -1142,6 +1153,7 @@ class Assembler {
|
||||
|
||||
// Load literal to FP register.
|
||||
void ldr(const FPRegister& ft, double imm);
|
||||
void ldr(const FPRegister& ft, float imm);
|
||||
|
||||
// Move instructions. The default shift of -1 indicates that the move
|
||||
// instruction will calculate an appropriate 16-bit immediate and left shift
|
||||
@ -1209,6 +1221,7 @@ class Assembler {
|
||||
// FP instructions.
|
||||
// Move immediate to FP register.
|
||||
void fmov(FPRegister fd, double imm);
|
||||
void fmov(FPRegister fd, float imm);
|
||||
|
||||
// Move FP register to register.
|
||||
void fmov(Register rd, FPRegister fn);
|
||||
@ -1355,14 +1368,14 @@ class Assembler {
|
||||
// character. The instruction pointer (pc_) is then aligned correctly for
|
||||
// subsequent instructions.
|
||||
void EmitStringData(const char * string) {
|
||||
ASSERT(string != NULL);
|
||||
VIXL_ASSERT(string != NULL);
|
||||
|
||||
size_t len = strlen(string) + 1;
|
||||
EmitData(string, len);
|
||||
|
||||
// Pad with NULL characters until pc_ is aligned.
|
||||
const char pad[] = {'\0', '\0', '\0', '\0'};
|
||||
ASSERT(sizeof(pad) == kInstructionSize);
|
||||
VIXL_STATIC_ASSERT(sizeof(pad) == kInstructionSize);
|
||||
Instruction* next_pc = AlignUp(pc_, kInstructionSize);
|
||||
EmitData(&pad, next_pc - pc_);
|
||||
}
|
||||
@ -1371,44 +1384,44 @@ class Assembler {
|
||||
|
||||
// Register encoding.
|
||||
static Instr Rd(CPURegister rd) {
|
||||
ASSERT(rd.code() != kSPRegInternalCode);
|
||||
VIXL_ASSERT(rd.code() != kSPRegInternalCode);
|
||||
return rd.code() << Rd_offset;
|
||||
}
|
||||
|
||||
static Instr Rn(CPURegister rn) {
|
||||
ASSERT(rn.code() != kSPRegInternalCode);
|
||||
VIXL_ASSERT(rn.code() != kSPRegInternalCode);
|
||||
return rn.code() << Rn_offset;
|
||||
}
|
||||
|
||||
static Instr Rm(CPURegister rm) {
|
||||
ASSERT(rm.code() != kSPRegInternalCode);
|
||||
VIXL_ASSERT(rm.code() != kSPRegInternalCode);
|
||||
return rm.code() << Rm_offset;
|
||||
}
|
||||
|
||||
static Instr Ra(CPURegister ra) {
|
||||
ASSERT(ra.code() != kSPRegInternalCode);
|
||||
VIXL_ASSERT(ra.code() != kSPRegInternalCode);
|
||||
return ra.code() << Ra_offset;
|
||||
}
|
||||
|
||||
static Instr Rt(CPURegister rt) {
|
||||
ASSERT(rt.code() != kSPRegInternalCode);
|
||||
VIXL_ASSERT(rt.code() != kSPRegInternalCode);
|
||||
return rt.code() << Rt_offset;
|
||||
}
|
||||
|
||||
static Instr Rt2(CPURegister rt2) {
|
||||
ASSERT(rt2.code() != kSPRegInternalCode);
|
||||
VIXL_ASSERT(rt2.code() != kSPRegInternalCode);
|
||||
return rt2.code() << Rt2_offset;
|
||||
}
|
||||
|
||||
// These encoding functions allow the stack pointer to be encoded, and
|
||||
// disallow the zero register.
|
||||
static Instr RdSP(Register rd) {
|
||||
ASSERT(!rd.IsZero());
|
||||
VIXL_ASSERT(!rd.IsZero());
|
||||
return (rd.code() & kRegCodeMask) << Rd_offset;
|
||||
}
|
||||
|
||||
static Instr RnSP(Register rn) {
|
||||
ASSERT(!rn.IsZero());
|
||||
VIXL_ASSERT(!rn.IsZero());
|
||||
return (rn.code() & kRegCodeMask) << Rn_offset;
|
||||
}
|
||||
|
||||
@ -1419,7 +1432,7 @@ class Assembler {
|
||||
} else if (S == LeaveFlags) {
|
||||
return 0 << FlagsUpdate_offset;
|
||||
}
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1429,7 +1442,7 @@ class Assembler {
|
||||
|
||||
// PC-relative address encoding.
|
||||
static Instr ImmPCRelAddress(int imm21) {
|
||||
ASSERT(is_int21(imm21));
|
||||
VIXL_ASSERT(is_int21(imm21));
|
||||
Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
|
||||
Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
|
||||
Instr immlo = imm << ImmPCRelLo_offset;
|
||||
@ -1438,27 +1451,27 @@ class Assembler {
|
||||
|
||||
// Branch encoding.
|
||||
static Instr ImmUncondBranch(int imm26) {
|
||||
ASSERT(is_int26(imm26));
|
||||
VIXL_ASSERT(is_int26(imm26));
|
||||
return truncate_to_int26(imm26) << ImmUncondBranch_offset;
|
||||
}
|
||||
|
||||
static Instr ImmCondBranch(int imm19) {
|
||||
ASSERT(is_int19(imm19));
|
||||
VIXL_ASSERT(is_int19(imm19));
|
||||
return truncate_to_int19(imm19) << ImmCondBranch_offset;
|
||||
}
|
||||
|
||||
static Instr ImmCmpBranch(int imm19) {
|
||||
ASSERT(is_int19(imm19));
|
||||
VIXL_ASSERT(is_int19(imm19));
|
||||
return truncate_to_int19(imm19) << ImmCmpBranch_offset;
|
||||
}
|
||||
|
||||
static Instr ImmTestBranch(int imm14) {
|
||||
ASSERT(is_int14(imm14));
|
||||
VIXL_ASSERT(is_int14(imm14));
|
||||
return truncate_to_int14(imm14) << ImmTestBranch_offset;
|
||||
}
|
||||
|
||||
static Instr ImmTestBranchBit(unsigned bit_pos) {
|
||||
ASSERT(is_uint6(bit_pos));
|
||||
VIXL_ASSERT(is_uint6(bit_pos));
|
||||
// Subtract five from the shift offset, as we need bit 5 from bit_pos.
|
||||
unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
|
||||
unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
|
||||
@ -1473,7 +1486,7 @@ class Assembler {
|
||||
}
|
||||
|
||||
static Instr ImmAddSub(int64_t imm) {
|
||||
ASSERT(IsImmAddSub(imm));
|
||||
VIXL_ASSERT(IsImmAddSub(imm));
|
||||
if (is_uint12(imm)) { // No shift required.
|
||||
return imm << ImmAddSub_offset;
|
||||
} else {
|
||||
@ -1482,55 +1495,55 @@ class Assembler {
|
||||
}
|
||||
|
||||
static inline Instr ImmS(unsigned imms, unsigned reg_size) {
|
||||
ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
|
||||
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
|
||||
((reg_size == kWRegSize) && is_uint5(imms)));
|
||||
USE(reg_size);
|
||||
return imms << ImmS_offset;
|
||||
}
|
||||
|
||||
static inline Instr ImmR(unsigned immr, unsigned reg_size) {
|
||||
ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
|
||||
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
|
||||
((reg_size == kWRegSize) && is_uint5(immr)));
|
||||
USE(reg_size);
|
||||
ASSERT(is_uint6(immr));
|
||||
VIXL_ASSERT(is_uint6(immr));
|
||||
return immr << ImmR_offset;
|
||||
}
|
||||
|
||||
static inline Instr ImmSetBits(unsigned imms, unsigned reg_size) {
|
||||
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
ASSERT(is_uint6(imms));
|
||||
ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
VIXL_ASSERT(is_uint6(imms));
|
||||
VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
|
||||
USE(reg_size);
|
||||
return imms << ImmSetBits_offset;
|
||||
}
|
||||
|
||||
static inline Instr ImmRotate(unsigned immr, unsigned reg_size) {
|
||||
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
|
||||
((reg_size == kWRegSize) && is_uint5(immr)));
|
||||
USE(reg_size);
|
||||
return immr << ImmRotate_offset;
|
||||
}
|
||||
|
||||
static inline Instr ImmLLiteral(int imm19) {
|
||||
ASSERT(is_int19(imm19));
|
||||
VIXL_ASSERT(is_int19(imm19));
|
||||
return truncate_to_int19(imm19) << ImmLLiteral_offset;
|
||||
}
|
||||
|
||||
static inline Instr BitN(unsigned bitn, unsigned reg_size) {
|
||||
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
ASSERT((reg_size == kXRegSize) || (bitn == 0));
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
|
||||
USE(reg_size);
|
||||
return bitn << BitN_offset;
|
||||
}
|
||||
|
||||
static Instr ShiftDP(Shift shift) {
|
||||
ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
|
||||
VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
|
||||
return shift << ShiftDP_offset;
|
||||
}
|
||||
|
||||
static Instr ImmDPShift(unsigned amount) {
|
||||
ASSERT(is_uint6(amount));
|
||||
VIXL_ASSERT(is_uint6(amount));
|
||||
return amount << ImmDPShift_offset;
|
||||
}
|
||||
|
||||
@ -1539,12 +1552,12 @@ class Assembler {
|
||||
}
|
||||
|
||||
static Instr ImmExtendShift(unsigned left_shift) {
|
||||
ASSERT(left_shift <= 4);
|
||||
VIXL_ASSERT(left_shift <= 4);
|
||||
return left_shift << ImmExtendShift_offset;
|
||||
}
|
||||
|
||||
static Instr ImmCondCmp(unsigned imm) {
|
||||
ASSERT(is_uint5(imm));
|
||||
VIXL_ASSERT(is_uint5(imm));
|
||||
return imm << ImmCondCmp_offset;
|
||||
}
|
||||
|
||||
@ -1554,65 +1567,65 @@ class Assembler {
|
||||
|
||||
// MemOperand offset encoding.
|
||||
static Instr ImmLSUnsigned(int imm12) {
|
||||
ASSERT(is_uint12(imm12));
|
||||
VIXL_ASSERT(is_uint12(imm12));
|
||||
return imm12 << ImmLSUnsigned_offset;
|
||||
}
|
||||
|
||||
static Instr ImmLS(int imm9) {
|
||||
ASSERT(is_int9(imm9));
|
||||
VIXL_ASSERT(is_int9(imm9));
|
||||
return truncate_to_int9(imm9) << ImmLS_offset;
|
||||
}
|
||||
|
||||
static Instr ImmLSPair(int imm7, LSDataSize size) {
|
||||
ASSERT(((imm7 >> size) << size) == imm7);
|
||||
VIXL_ASSERT(((imm7 >> size) << size) == imm7);
|
||||
int scaled_imm7 = imm7 >> size;
|
||||
ASSERT(is_int7(scaled_imm7));
|
||||
VIXL_ASSERT(is_int7(scaled_imm7));
|
||||
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
|
||||
}
|
||||
|
||||
static Instr ImmShiftLS(unsigned shift_amount) {
|
||||
ASSERT(is_uint1(shift_amount));
|
||||
VIXL_ASSERT(is_uint1(shift_amount));
|
||||
return shift_amount << ImmShiftLS_offset;
|
||||
}
|
||||
|
||||
static Instr ImmException(int imm16) {
|
||||
ASSERT(is_uint16(imm16));
|
||||
VIXL_ASSERT(is_uint16(imm16));
|
||||
return imm16 << ImmException_offset;
|
||||
}
|
||||
|
||||
static Instr ImmSystemRegister(int imm15) {
|
||||
ASSERT(is_uint15(imm15));
|
||||
VIXL_ASSERT(is_uint15(imm15));
|
||||
return imm15 << ImmSystemRegister_offset;
|
||||
}
|
||||
|
||||
static Instr ImmHint(int imm7) {
|
||||
ASSERT(is_uint7(imm7));
|
||||
VIXL_ASSERT(is_uint7(imm7));
|
||||
return imm7 << ImmHint_offset;
|
||||
}
|
||||
|
||||
static Instr ImmBarrierDomain(int imm2) {
|
||||
ASSERT(is_uint2(imm2));
|
||||
VIXL_ASSERT(is_uint2(imm2));
|
||||
return imm2 << ImmBarrierDomain_offset;
|
||||
}
|
||||
|
||||
static Instr ImmBarrierType(int imm2) {
|
||||
ASSERT(is_uint2(imm2));
|
||||
VIXL_ASSERT(is_uint2(imm2));
|
||||
return imm2 << ImmBarrierType_offset;
|
||||
}
|
||||
|
||||
static LSDataSize CalcLSDataSize(LoadStoreOp op) {
|
||||
ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
|
||||
VIXL_ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
|
||||
return static_cast<LSDataSize>(op >> SizeLS_offset);
|
||||
}
|
||||
|
||||
// Move immediates encoding.
|
||||
static Instr ImmMoveWide(uint64_t imm) {
|
||||
ASSERT(is_uint16(imm));
|
||||
VIXL_ASSERT(is_uint16(imm));
|
||||
return imm << ImmMoveWide_offset;
|
||||
}
|
||||
|
||||
static Instr ShiftMoveWide(int64_t shift) {
|
||||
ASSERT(is_uint2(shift));
|
||||
VIXL_ASSERT(is_uint2(shift));
|
||||
return shift << ShiftMoveWide_offset;
|
||||
}
|
||||
|
||||
@ -1626,20 +1639,20 @@ class Assembler {
|
||||
}
|
||||
|
||||
static Instr FPScale(unsigned scale) {
|
||||
ASSERT(is_uint6(scale));
|
||||
VIXL_ASSERT(is_uint6(scale));
|
||||
return scale << FPScale_offset;
|
||||
}
|
||||
|
||||
// Size of the code generated in bytes
|
||||
uint64_t SizeOfCodeGenerated() const {
|
||||
ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
|
||||
VIXL_ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
|
||||
return pc_ - buffer_;
|
||||
}
|
||||
|
||||
// Size of the code generated since label to the current position.
|
||||
uint64_t SizeOfCodeGeneratedSince(Label* label) const {
|
||||
ASSERT(label->IsBound());
|
||||
ASSERT((pc_ >= label->target()) && (pc_ < (buffer_ + buffer_size_)));
|
||||
VIXL_ASSERT(label->IsBound());
|
||||
VIXL_ASSERT((pc_ >= label->target()) && (pc_ < (buffer_ + buffer_size_)));
|
||||
return pc_ - label->target();
|
||||
}
|
||||
|
||||
@ -1651,7 +1664,7 @@ class Assembler {
|
||||
inline void ReleaseLiteralPool() {
|
||||
if (--literal_pool_monitor_ == 0) {
|
||||
// Has the literal pool been blocked for too long?
|
||||
ASSERT(literals_.empty() ||
|
||||
VIXL_ASSERT(literals_.empty() ||
|
||||
(pc_ < (literals_.back()->pc_ + kMaxLoadLiteralRange)));
|
||||
}
|
||||
}
|
||||
@ -1705,6 +1718,9 @@ class Assembler {
|
||||
FlagsUpdate S,
|
||||
AddSubWithCarryOp op);
|
||||
|
||||
static bool IsImmFP32(float imm);
|
||||
static bool IsImmFP64(double imm);
|
||||
|
||||
// Functions for emulating operands not directly supported by the instruction
|
||||
// set.
|
||||
void EmitShift(const Register& rd,
|
||||
@ -1789,17 +1805,13 @@ class Assembler {
|
||||
const FPRegister& fa,
|
||||
FPDataProcessing3SourceOp op);
|
||||
|
||||
// Encoding helpers.
|
||||
static bool IsImmFP32(float imm);
|
||||
static bool IsImmFP64(double imm);
|
||||
|
||||
void RecordLiteral(int64_t imm, unsigned size);
|
||||
|
||||
// Emit the instruction at pc_.
|
||||
void Emit(Instr instruction) {
|
||||
ASSERT(sizeof(*pc_) == 1);
|
||||
ASSERT(sizeof(instruction) == kInstructionSize);
|
||||
ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
|
||||
VIXL_STATIC_ASSERT(sizeof(*pc_) == 1);
|
||||
VIXL_STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
|
||||
VIXL_ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
|
||||
|
||||
#ifdef DEBUG
|
||||
finalized_ = false;
|
||||
@ -1812,8 +1824,8 @@ class Assembler {
|
||||
|
||||
// Emit data inline in the instruction stream.
|
||||
void EmitData(void const * data, unsigned size) {
|
||||
ASSERT(sizeof(*pc_) == 1);
|
||||
ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
|
||||
VIXL_STATIC_ASSERT(sizeof(*pc_) == 1);
|
||||
VIXL_ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
|
||||
|
||||
#ifdef DEBUG
|
||||
finalized_ = false;
|
||||
@ -1827,7 +1839,7 @@ class Assembler {
|
||||
}
|
||||
|
||||
inline void CheckBufferSpace() {
|
||||
ASSERT(pc_ < (buffer_ + buffer_size_));
|
||||
VIXL_ASSERT(pc_ < (buffer_ + buffer_size_));
|
||||
if (pc_ > next_literal_pool_check_) {
|
||||
CheckLiteralPool();
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ enum Condition {
|
||||
inline Condition InvertCondition(Condition cond) {
|
||||
// Conditions al and nv behave identically, as "always true". They can't be
|
||||
// inverted, because there is no "always false" condition.
|
||||
ASSERT((cond != al) && (cond != nv));
|
||||
VIXL_ASSERT((cond != al) && (cond != nv));
|
||||
return static_cast<Condition>(cond ^ 1);
|
||||
}
|
||||
|
||||
@ -290,7 +290,7 @@ enum SystemRegister {
|
||||
//
|
||||
// The enumerations can be used like this:
|
||||
//
|
||||
// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
|
||||
// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
|
||||
// switch(instr->Mask(PCRelAddressingMask)) {
|
||||
// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
|
||||
// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
|
||||
|
@ -86,8 +86,8 @@ void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) {
|
||||
uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_);
|
||||
uintptr_t isize = static_cast<uintptr_t>(icache_line_size_);
|
||||
// Cache line sizes are always a power of 2.
|
||||
ASSERT(CountSetBits(dsize, 64) == 1);
|
||||
ASSERT(CountSetBits(isize, 64) == 1);
|
||||
VIXL_ASSERT(CountSetBits(dsize, 64) == 1);
|
||||
VIXL_ASSERT(CountSetBits(isize, 64) == 1);
|
||||
uintptr_t dstart = start & ~(dsize - 1);
|
||||
uintptr_t istart = start & ~(isize - 1);
|
||||
uintptr_t end = start + length;
|
||||
|
@ -24,6 +24,8 @@
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifdef USE_SIMULATOR
|
||||
|
||||
#include "a64/debugger-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
@ -86,7 +88,7 @@ class RegisterToken : public ValueToken<const Register> {
|
||||
|
||||
static Token* Tokenize(const char* arg);
|
||||
static RegisterToken* Cast(Token* tok) {
|
||||
ASSERT(tok->IsRegister());
|
||||
VIXL_ASSERT(tok->IsRegister());
|
||||
return reinterpret_cast<RegisterToken*>(tok);
|
||||
}
|
||||
|
||||
@ -108,7 +110,7 @@ class FPRegisterToken : public ValueToken<const FPRegister> {
|
||||
|
||||
static Token* Tokenize(const char* arg);
|
||||
static FPRegisterToken* Cast(Token* tok) {
|
||||
ASSERT(tok->IsFPRegister());
|
||||
VIXL_ASSERT(tok->IsFPRegister());
|
||||
return reinterpret_cast<FPRegisterToken*>(tok);
|
||||
}
|
||||
};
|
||||
@ -132,7 +134,7 @@ class IdentifierToken : public ValueToken<char*> {
|
||||
|
||||
static Token* Tokenize(const char* arg);
|
||||
static IdentifierToken* Cast(Token* tok) {
|
||||
ASSERT(tok->IsIdentifier());
|
||||
VIXL_ASSERT(tok->IsIdentifier());
|
||||
return reinterpret_cast<IdentifierToken*>(tok);
|
||||
}
|
||||
};
|
||||
@ -150,7 +152,7 @@ class AddressToken : public ValueToken<uint8_t*> {
|
||||
|
||||
static Token* Tokenize(const char* arg);
|
||||
static AddressToken* Cast(Token* tok) {
|
||||
ASSERT(tok->IsAddress());
|
||||
VIXL_ASSERT(tok->IsAddress());
|
||||
return reinterpret_cast<AddressToken*>(tok);
|
||||
}
|
||||
};
|
||||
@ -167,7 +169,7 @@ class IntegerToken : public ValueToken<int64_t> {
|
||||
|
||||
static Token* Tokenize(const char* arg);
|
||||
static IntegerToken* Cast(Token* tok) {
|
||||
ASSERT(tok->IsInteger());
|
||||
VIXL_ASSERT(tok->IsInteger());
|
||||
return reinterpret_cast<IntegerToken*>(tok);
|
||||
}
|
||||
};
|
||||
@ -194,7 +196,7 @@ class FormatToken : public Token {
|
||||
|
||||
static Token* Tokenize(const char* arg);
|
||||
static FormatToken* Cast(Token* tok) {
|
||||
ASSERT(tok->IsFormat());
|
||||
VIXL_ASSERT(tok->IsFormat());
|
||||
return reinterpret_cast<FormatToken*>(tok);
|
||||
}
|
||||
};
|
||||
@ -604,7 +606,7 @@ void Debugger::PrintRegister(const Register& target_reg,
|
||||
const uint64_t reg_value = reg<uint64_t>(reg_size,
|
||||
target_reg.code(),
|
||||
Reg31IsStackPointer);
|
||||
ASSERT(count > 0);
|
||||
VIXL_ASSERT(count > 0);
|
||||
|
||||
printf("%s = ", name);
|
||||
for (uint64_t i = 1; i <= count; i++) {
|
||||
@ -625,7 +627,7 @@ void Debugger::PrintFPRegister(const FPRegister& target_fpreg,
|
||||
const uint64_t mask = 0xffffffffffffffff >> (64 - format_size);
|
||||
const uint64_t fpreg_value = fpreg<uint64_t>(fpreg_size,
|
||||
target_fpreg.code());
|
||||
ASSERT(count > 0);
|
||||
VIXL_ASSERT(count > 0);
|
||||
|
||||
if (target_fpreg.Is32Bits()) {
|
||||
printf("s%u = ", target_fpreg.code());
|
||||
@ -713,8 +715,8 @@ char* Debugger::ReadCommandLine(const char* prompt, char* buffer, int length) {
|
||||
}
|
||||
|
||||
// Remove the newline from the end of the command.
|
||||
ASSERT(end[1] == '\0');
|
||||
ASSERT((end - buffer) < (length - 1));
|
||||
VIXL_ASSERT(end[1] == '\0');
|
||||
VIXL_ASSERT((end - buffer) < (length - 1));
|
||||
end[0] = '\0';
|
||||
|
||||
return buffer;
|
||||
@ -760,7 +762,7 @@ void Debugger::RunDebuggerShell() {
|
||||
|
||||
|
||||
void Debugger::DoBreakpoint(Instruction* instr) {
|
||||
ASSERT(instr->Mask(ExceptionMask) == BRK);
|
||||
VIXL_ASSERT(instr->Mask(ExceptionMask) == BRK);
|
||||
|
||||
printf("Hit breakpoint at pc=%p.\n", reinterpret_cast<void*>(instr));
|
||||
set_debug_parameters(debug_parameters() | DBG_BREAK | DBG_ACTIVE);
|
||||
@ -770,8 +772,8 @@ void Debugger::DoBreakpoint(Instruction* instr) {
|
||||
|
||||
|
||||
void Debugger::DoUnreachable(Instruction* instr) {
|
||||
ASSERT((instr->Mask(ExceptionMask) == HLT) &&
|
||||
(instr->ImmException() == kUnreachableOpcode));
|
||||
VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
|
||||
(instr->ImmException() == kUnreachableOpcode));
|
||||
|
||||
fprintf(stream_, "Hit UNREACHABLE marker at pc=%p.\n",
|
||||
reinterpret_cast<void*>(instr));
|
||||
@ -780,14 +782,14 @@ void Debugger::DoUnreachable(Instruction* instr) {
|
||||
|
||||
|
||||
void Debugger::DoTrace(Instruction* instr) {
|
||||
ASSERT((instr->Mask(ExceptionMask) == HLT) &&
|
||||
(instr->ImmException() == kTraceOpcode));
|
||||
VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
|
||||
(instr->ImmException() == kTraceOpcode));
|
||||
|
||||
// Read the arguments encoded inline in the instruction stream.
|
||||
uint32_t parameters;
|
||||
uint32_t command;
|
||||
|
||||
ASSERT(sizeof(*instr) == 1);
|
||||
VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
|
||||
memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters));
|
||||
memcpy(&command, instr + kTraceCommandOffset, sizeof(command));
|
||||
|
||||
@ -799,7 +801,7 @@ void Debugger::DoTrace(Instruction* instr) {
|
||||
set_log_parameters(log_parameters() & ~parameters);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
|
||||
set_pc(instr->InstructionAtOffset(kTraceLength));
|
||||
@ -807,17 +809,17 @@ void Debugger::DoTrace(Instruction* instr) {
|
||||
|
||||
|
||||
void Debugger::DoLog(Instruction* instr) {
|
||||
ASSERT((instr->Mask(ExceptionMask) == HLT) &&
|
||||
(instr->ImmException() == kLogOpcode));
|
||||
VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
|
||||
(instr->ImmException() == kLogOpcode));
|
||||
|
||||
// Read the arguments encoded inline in the instruction stream.
|
||||
uint32_t parameters;
|
||||
|
||||
ASSERT(sizeof(*instr) == 1);
|
||||
VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
|
||||
memcpy(¶meters, instr + kTraceParamsOffset, sizeof(parameters));
|
||||
|
||||
// We don't support a one-shot LOG_DISASM.
|
||||
ASSERT((parameters & LOG_DISASM) == 0);
|
||||
VIXL_ASSERT((parameters & LOG_DISASM) == 0);
|
||||
// Print the requested information.
|
||||
if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
|
||||
if (parameters & LOG_REGS) PrintRegisters(true);
|
||||
@ -879,7 +881,7 @@ static bool StringToInt64(int64_t* value, const char* line, int base = 10) {
|
||||
|
||||
uint8_t* Token::ToAddress(Debugger* debugger) const {
|
||||
USE(debugger);
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -922,7 +924,7 @@ Token* Token::Tokenize(const char* arg) {
|
||||
|
||||
|
||||
uint8_t* RegisterToken::ToAddress(Debugger* debugger) const {
|
||||
ASSERT(CanAddressMemory());
|
||||
VIXL_ASSERT(CanAddressMemory());
|
||||
uint64_t reg_value = debugger->xreg(value().code(), Reg31IsStackPointer);
|
||||
uint8_t* address = NULL;
|
||||
memcpy(&address, ®_value, sizeof(address));
|
||||
@ -931,7 +933,7 @@ uint8_t* RegisterToken::ToAddress(Debugger* debugger) const {
|
||||
|
||||
|
||||
void RegisterToken::Print(FILE* out) const {
|
||||
ASSERT(value().IsValid());
|
||||
VIXL_ASSERT(value().IsValid());
|
||||
fprintf(out, "[Register %s]", Name());
|
||||
}
|
||||
|
||||
@ -967,7 +969,7 @@ Token* RegisterToken::Tokenize(const char* arg) {
|
||||
|
||||
|
||||
void FPRegisterToken::Print(FILE* out) const {
|
||||
ASSERT(value().IsValid());
|
||||
VIXL_ASSERT(value().IsValid());
|
||||
char prefix = value().Is32Bits() ? 's' : 'd';
|
||||
fprintf(out, "[FPRegister %c%" PRIu32 "]", prefix, value().code());
|
||||
}
|
||||
@ -995,7 +997,7 @@ Token* FPRegisterToken::Tokenize(const char* arg) {
|
||||
switch (*arg) {
|
||||
case 's': fpreg = FPRegister::SRegFromCode(code); break;
|
||||
case 'd': fpreg = FPRegister::DRegFromCode(code); break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
|
||||
return new FPRegisterToken(fpreg);
|
||||
@ -1006,7 +1008,7 @@ Token* FPRegisterToken::Tokenize(const char* arg) {
|
||||
|
||||
|
||||
uint8_t* IdentifierToken::ToAddress(Debugger* debugger) const {
|
||||
ASSERT(CanAddressMemory());
|
||||
VIXL_ASSERT(CanAddressMemory());
|
||||
Instruction* pc_value = debugger->pc();
|
||||
uint8_t* address = NULL;
|
||||
memcpy(&address, &pc_value, sizeof(address));
|
||||
@ -1142,7 +1144,7 @@ Token* FormatToken::Tokenize(const char* arg) {
|
||||
default: return NULL;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@ -1150,7 +1152,8 @@ Token* FormatToken::Tokenize(const char* arg) {
|
||||
|
||||
template<typename T>
|
||||
void Format<T>::Print(FILE* out) const {
|
||||
fprintf(out, "[Format %c%lu - %s]", type_code_, sizeof(T) * 8, fmt_);
|
||||
unsigned size = sizeof(T) * 8;
|
||||
fprintf(out, "[Format %c%u - %s]", type_code_, size, fmt_);
|
||||
}
|
||||
|
||||
|
||||
@ -1222,8 +1225,8 @@ DebugCommand* DebugCommand::Parse(char* line) {
|
||||
void DebugCommand::PrintHelp(const char** aliases,
|
||||
const char* args,
|
||||
const char* help) {
|
||||
ASSERT(aliases[0] != NULL);
|
||||
ASSERT(help != NULL);
|
||||
VIXL_ASSERT(aliases[0] != NULL);
|
||||
VIXL_ASSERT(help != NULL);
|
||||
|
||||
printf("\n----\n\n");
|
||||
for (const char** current = aliases; *current != NULL; current++) {
|
||||
@ -1238,7 +1241,7 @@ void DebugCommand::PrintHelp(const char** aliases,
|
||||
|
||||
|
||||
bool HelpCommand::Run(Debugger* debugger) {
|
||||
ASSERT(debugger->IsDebuggerRunning());
|
||||
VIXL_ASSERT(debugger->IsDebuggerRunning());
|
||||
USE(debugger);
|
||||
|
||||
#define PRINT_HELP(Command) \
|
||||
@ -1263,7 +1266,7 @@ DebugCommand* HelpCommand::Build(std::vector<Token*> args) {
|
||||
|
||||
|
||||
bool ContinueCommand::Run(Debugger* debugger) {
|
||||
ASSERT(debugger->IsDebuggerRunning());
|
||||
VIXL_ASSERT(debugger->IsDebuggerRunning());
|
||||
|
||||
debugger->set_debug_parameters(debugger->debug_parameters() & ~DBG_ACTIVE);
|
||||
return true;
|
||||
@ -1280,7 +1283,7 @@ DebugCommand* ContinueCommand::Build(std::vector<Token*> args) {
|
||||
|
||||
|
||||
bool StepCommand::Run(Debugger* debugger) {
|
||||
ASSERT(debugger->IsDebuggerRunning());
|
||||
VIXL_ASSERT(debugger->IsDebuggerRunning());
|
||||
|
||||
int64_t steps = count();
|
||||
if (steps < 0) {
|
||||
@ -1355,7 +1358,7 @@ void PrintCommand::Print(FILE* out) {
|
||||
|
||||
|
||||
bool PrintCommand::Run(Debugger* debugger) {
|
||||
ASSERT(debugger->IsDebuggerRunning());
|
||||
VIXL_ASSERT(debugger->IsDebuggerRunning());
|
||||
|
||||
Token* tok = target();
|
||||
if (tok->IsIdentifier()) {
|
||||
@ -1376,7 +1379,7 @@ bool PrintCommand::Run(Debugger* debugger) {
|
||||
}
|
||||
|
||||
FormatToken* format_tok = format();
|
||||
ASSERT(format_tok != NULL);
|
||||
VIXL_ASSERT(format_tok != NULL);
|
||||
if (format_tok->type_code() == 'i') {
|
||||
// TODO(all): Add support for instruction disassembly.
|
||||
printf(" ** unsupported format: instructions **\n");
|
||||
@ -1396,7 +1399,7 @@ bool PrintCommand::Run(Debugger* debugger) {
|
||||
return false;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1431,13 +1434,13 @@ DebugCommand* PrintCommand::Build(std::vector<Token*> args) {
|
||||
switch (target_size) {
|
||||
case 4: format = new Format<uint32_t>("%08" PRIx32, 'x'); break;
|
||||
case 8: format = new Format<uint64_t>("%016" PRIx64, 'x'); break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
} else if (target->IsFPRegister()) {
|
||||
switch (target_size) {
|
||||
case 4: format = new Format<float>("%8g", 'f'); break;
|
||||
case 8: format = new Format<double>("%8g", 'f'); break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -1469,7 +1472,7 @@ DebugCommand* PrintCommand::Build(std::vector<Token*> args) {
|
||||
|
||||
|
||||
bool ExamineCommand::Run(Debugger* debugger) {
|
||||
ASSERT(debugger->IsDebuggerRunning());
|
||||
VIXL_ASSERT(debugger->IsDebuggerRunning());
|
||||
|
||||
uint8_t* address = target()->ToAddress(debugger);
|
||||
int64_t amount = count()->value();
|
||||
@ -1522,7 +1525,7 @@ DebugCommand* ExamineCommand::Build(std::vector<Token*> args) {
|
||||
} else {
|
||||
return new InvalidCommand(args, 2, "expects format or integer");
|
||||
}
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
case 4: { // mem addr.format n
|
||||
@ -1552,7 +1555,7 @@ UnknownCommand::~UnknownCommand() {
|
||||
|
||||
|
||||
bool UnknownCommand::Run(Debugger* debugger) {
|
||||
ASSERT(debugger->IsDebuggerRunning());
|
||||
VIXL_ASSERT(debugger->IsDebuggerRunning());
|
||||
USE(debugger);
|
||||
|
||||
printf(" ** Unknown Command:");
|
||||
@ -1576,7 +1579,7 @@ InvalidCommand::~InvalidCommand() {
|
||||
|
||||
|
||||
bool InvalidCommand::Run(Debugger* debugger) {
|
||||
ASSERT(debugger->IsDebuggerRunning());
|
||||
VIXL_ASSERT(debugger->IsDebuggerRunning());
|
||||
USE(debugger);
|
||||
|
||||
printf(" ** Invalid Command:");
|
||||
@ -1598,3 +1601,5 @@ bool InvalidCommand::Run(Debugger* debugger) {
|
||||
}
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // USE_SIMULATOR
|
||||
|
@ -137,7 +137,7 @@ class Debugger : public Simulator {
|
||||
// back control.
|
||||
inline int steps() { return steps_; }
|
||||
inline void set_steps(int value) {
|
||||
ASSERT(value > 1);
|
||||
VIXL_ASSERT(value > 1);
|
||||
steps_ = value;
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
|
||||
}
|
||||
// We reached the end of the list. The last element must be
|
||||
// registered_visitor.
|
||||
ASSERT(*it == registered_visitor);
|
||||
VIXL_ASSERT(*it == registered_visitor);
|
||||
visitors_.insert(it, new_visitor);
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
|
||||
}
|
||||
// We reached the end of the list. The last element must be
|
||||
// registered_visitor.
|
||||
ASSERT(*it == registered_visitor);
|
||||
VIXL_ASSERT(*it == registered_visitor);
|
||||
visitors_.push_back(new_visitor);
|
||||
}
|
||||
|
||||
@ -161,16 +161,16 @@ void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
|
||||
|
||||
|
||||
void Decoder::DecodePCRelAddressing(Instruction* instr) {
|
||||
ASSERT(instr->Bits(27, 24) == 0x0);
|
||||
VIXL_ASSERT(instr->Bits(27, 24) == 0x0);
|
||||
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
|
||||
// decode.
|
||||
ASSERT(instr->Bit(28) == 0x1);
|
||||
VIXL_ASSERT(instr->Bit(28) == 0x1);
|
||||
VisitPCRelAddressing(instr);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeBranchSystemException(Instruction* instr) {
|
||||
ASSERT((instr->Bits(27, 24) == 0x4) ||
|
||||
VIXL_ASSERT((instr->Bits(27, 24) == 0x4) ||
|
||||
(instr->Bits(27, 24) == 0x5) ||
|
||||
(instr->Bits(27, 24) == 0x6) ||
|
||||
(instr->Bits(27, 24) == 0x7) );
|
||||
@ -271,7 +271,7 @@ void Decoder::DecodeBranchSystemException(Instruction* instr) {
|
||||
|
||||
|
||||
void Decoder::DecodeLoadStore(Instruction* instr) {
|
||||
ASSERT((instr->Bits(27, 24) == 0x8) ||
|
||||
VIXL_ASSERT((instr->Bits(27, 24) == 0x8) ||
|
||||
(instr->Bits(27, 24) == 0x9) ||
|
||||
(instr->Bits(27, 24) == 0xC) ||
|
||||
(instr->Bits(27, 24) == 0xD) );
|
||||
@ -390,7 +390,7 @@ void Decoder::DecodeLoadStore(Instruction* instr) {
|
||||
|
||||
|
||||
void Decoder::DecodeLogical(Instruction* instr) {
|
||||
ASSERT(instr->Bits(27, 24) == 0x2);
|
||||
VIXL_ASSERT(instr->Bits(27, 24) == 0x2);
|
||||
|
||||
if (instr->Mask(0x80400000) == 0x00400000) {
|
||||
VisitUnallocated(instr);
|
||||
@ -409,7 +409,7 @@ void Decoder::DecodeLogical(Instruction* instr) {
|
||||
|
||||
|
||||
void Decoder::DecodeBitfieldExtract(Instruction* instr) {
|
||||
ASSERT(instr->Bits(27, 24) == 0x3);
|
||||
VIXL_ASSERT(instr->Bits(27, 24) == 0x3);
|
||||
|
||||
if ((instr->Mask(0x80400000) == 0x80000000) ||
|
||||
(instr->Mask(0x80400000) == 0x00400000) ||
|
||||
@ -434,7 +434,7 @@ void Decoder::DecodeBitfieldExtract(Instruction* instr) {
|
||||
|
||||
|
||||
void Decoder::DecodeAddSubImmediate(Instruction* instr) {
|
||||
ASSERT(instr->Bits(27, 24) == 0x1);
|
||||
VIXL_ASSERT(instr->Bits(27, 24) == 0x1);
|
||||
if (instr->Bit(23) == 1) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
@ -444,8 +444,8 @@ void Decoder::DecodeAddSubImmediate(Instruction* instr) {
|
||||
|
||||
|
||||
void Decoder::DecodeDataProcessing(Instruction* instr) {
|
||||
ASSERT((instr->Bits(27, 24) == 0xA) ||
|
||||
(instr->Bits(27, 24) == 0xB) );
|
||||
VIXL_ASSERT((instr->Bits(27, 24) == 0xA) ||
|
||||
(instr->Bits(27, 24) == 0xB));
|
||||
|
||||
if (instr->Bit(24) == 0) {
|
||||
if (instr->Bit(28) == 0) {
|
||||
@ -559,8 +559,8 @@ void Decoder::DecodeDataProcessing(Instruction* instr) {
|
||||
|
||||
|
||||
void Decoder::DecodeFP(Instruction* instr) {
|
||||
ASSERT((instr->Bits(27, 24) == 0xE) ||
|
||||
(instr->Bits(27, 24) == 0xF) );
|
||||
VIXL_ASSERT((instr->Bits(27, 24) == 0xE) ||
|
||||
(instr->Bits(27, 24) == 0xF));
|
||||
|
||||
if (instr->Bit(28) == 0) {
|
||||
DecodeAdvSIMDDataProcessing(instr);
|
||||
@ -665,14 +665,14 @@ void Decoder::DecodeFP(Instruction* instr) {
|
||||
VisitFPConditionalSelect(instr);
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Bit 30 == 1 has been handled earlier.
|
||||
ASSERT(instr->Bit(30) == 0);
|
||||
VIXL_ASSERT(instr->Bit(30) == 0);
|
||||
if (instr->Mask(0xA0800000) != 0) {
|
||||
VisitUnallocated(instr);
|
||||
} else {
|
||||
@ -687,21 +687,21 @@ void Decoder::DecodeFP(Instruction* instr) {
|
||||
|
||||
void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) {
|
||||
// TODO: Implement Advanced SIMD load/store instruction decode.
|
||||
ASSERT(instr->Bits(29, 25) == 0x6);
|
||||
VIXL_ASSERT(instr->Bits(29, 25) == 0x6);
|
||||
VisitUnimplemented(instr);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) {
|
||||
// TODO: Implement Advanced SIMD data processing instruction decode.
|
||||
ASSERT(instr->Bits(27, 25) == 0x7);
|
||||
VIXL_ASSERT(instr->Bits(27, 25) == 0x7);
|
||||
VisitUnimplemented(instr);
|
||||
}
|
||||
|
||||
|
||||
#define DEFINE_VISITOR_CALLERS(A) \
|
||||
void Decoder::Visit##A(Instruction *instr) { \
|
||||
ASSERT(instr->Mask(A##FMask) == A##Fixed); \
|
||||
VIXL_ASSERT(instr->Mask(A##FMask) == A##Fixed); \
|
||||
std::list<DecoderVisitor*>::iterator it; \
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
|
||||
(*it)->Visit##A(instr); \
|
||||
|
@ -95,7 +95,7 @@ void Disassembler::VisitAddSubImmediate(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -142,7 +142,7 @@ void Disassembler::VisitAddSubShifted(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -180,7 +180,7 @@ void Disassembler::VisitAddSubExtended(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -215,7 +215,7 @@ void Disassembler::VisitAddSubWithCarry(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -258,30 +258,30 @@ void Disassembler::VisitLogicalImmediate(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
|
||||
|
||||
bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
|
||||
ASSERT((reg_size == kXRegSize) ||
|
||||
((reg_size == kWRegSize) && (value <= 0xffffffff)));
|
||||
VIXL_ASSERT((reg_size == kXRegSize) ||
|
||||
((reg_size == kWRegSize) && (value <= 0xffffffff)));
|
||||
|
||||
// Test for movz: 16 bits set at positions 0, 16, 32 or 48.
|
||||
if (((value & 0xffffffffffff0000UL) == 0UL) ||
|
||||
((value & 0xffffffff0000ffffUL) == 0UL) ||
|
||||
((value & 0xffff0000ffffffffUL) == 0UL) ||
|
||||
((value & 0x0000ffffffffffffUL) == 0UL)) {
|
||||
if (((value & 0xffffffffffff0000) == 0) ||
|
||||
((value & 0xffffffff0000ffff) == 0) ||
|
||||
((value & 0xffff0000ffffffff) == 0) ||
|
||||
((value & 0x0000ffffffffffff) == 0)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48).
|
||||
if ((reg_size == kXRegSize) &&
|
||||
(((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
|
||||
((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
|
||||
((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
|
||||
((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
|
||||
(((value & 0xffffffffffff0000) == 0xffffffffffff0000) ||
|
||||
((value & 0xffffffff0000ffff) == 0xffffffff0000ffff) ||
|
||||
((value & 0xffff0000ffffffff) == 0xffff0000ffffffff) ||
|
||||
((value & 0x0000ffffffffffff) == 0x0000ffffffffffff))) {
|
||||
return true;
|
||||
}
|
||||
if ((reg_size == kWRegSize) &&
|
||||
@ -337,7 +337,7 @@ void Disassembler::VisitLogicalShifted(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
|
||||
Format(instr, mnemonic, form);
|
||||
@ -353,7 +353,7 @@ void Disassembler::VisitConditionalCompareRegister(Instruction* instr) {
|
||||
case CCMN_x: mnemonic = "ccmn"; break;
|
||||
case CCMP_w:
|
||||
case CCMP_x: mnemonic = "ccmp"; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -368,7 +368,7 @@ void Disassembler::VisitConditionalCompareImmediate(Instruction* instr) {
|
||||
case CCMN_x_imm: mnemonic = "ccmn"; break;
|
||||
case CCMP_w_imm:
|
||||
case CCMP_x_imm: mnemonic = "ccmp"; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -421,7 +421,7 @@ void Disassembler::VisitConditionalSelect(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -520,7 +520,7 @@ void Disassembler::VisitExtract(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -538,7 +538,7 @@ void Disassembler::VisitPCRelAddressing(Instruction* instr) {
|
||||
void Disassembler::VisitConditionalBranch(Instruction* instr) {
|
||||
switch (instr->Mask(ConditionalBranchMask)) {
|
||||
case B_cond: Format(instr, "b.'CBrn", "'BImmCond"); break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
@ -570,7 +570,7 @@ void Disassembler::VisitUnconditionalBranch(Instruction* instr) {
|
||||
switch (instr->Mask(UnconditionalBranchMask)) {
|
||||
case B: mnemonic = "b"; break;
|
||||
case BL: mnemonic = "bl"; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -591,7 +591,7 @@ void Disassembler::VisitDataProcessing1Source(Instruction* instr) {
|
||||
FORMAT(CLS, "cls");
|
||||
#undef FORMAT
|
||||
case REV32_x: mnemonic = "rev32"; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -690,7 +690,7 @@ void Disassembler::VisitDataProcessing3Source(Instruction* instr) {
|
||||
form = form_xxx;
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -705,7 +705,7 @@ void Disassembler::VisitCompareBranch(Instruction* instr) {
|
||||
case CBZ_x: mnemonic = "cbz"; break;
|
||||
case CBNZ_w:
|
||||
case CBNZ_x: mnemonic = "cbnz"; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -722,7 +722,7 @@ void Disassembler::VisitTestBranch(Instruction* instr) {
|
||||
switch (instr->Mask(TestBranchMask)) {
|
||||
case TBZ: mnemonic = "tbz"; break;
|
||||
case TBNZ: mnemonic = "tbnz"; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -742,7 +742,7 @@ void Disassembler::VisitMoveWideImmediate(Instruction* instr) {
|
||||
case MOVZ_x: mnemonic = "movz"; break;
|
||||
case MOVK_w:
|
||||
case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -981,7 +981,7 @@ void Disassembler::VisitFPConditionalSelect(Instruction* instr) {
|
||||
switch (instr->Mask(FPConditionalSelectMask)) {
|
||||
case FCSEL_s:
|
||||
case FCSEL_d: mnemonic = "fcsel"; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -1033,7 +1033,7 @@ void Disassembler::VisitFPDataProcessing2Source(Instruction* instr) {
|
||||
FORMAT(FMINNM, "fminnm");
|
||||
FORMAT(FNMUL, "fnmul");
|
||||
#undef FORMAT
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -1052,7 +1052,7 @@ void Disassembler::VisitFPDataProcessing3Source(Instruction* instr) {
|
||||
FORMAT(FNMADD, "fnmadd");
|
||||
FORMAT(FNMSUB, "fnmsub");
|
||||
#undef FORMAT
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -1065,7 +1065,7 @@ void Disassembler::VisitFPImmediate(Instruction* instr) {
|
||||
switch (instr->Mask(FPImmediateMask)) {
|
||||
case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
|
||||
case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -1149,7 +1149,7 @@ void Disassembler::VisitFPFixedPointConvert(Instruction* instr) {
|
||||
case UCVTF_sx_fixed:
|
||||
case UCVTF_dw_fixed:
|
||||
case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
Format(instr, mnemonic, form);
|
||||
}
|
||||
@ -1184,7 +1184,7 @@ void Disassembler::VisitSystem(Instruction* instr) {
|
||||
}
|
||||
}
|
||||
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
|
||||
ASSERT(instr->Mask(SystemHintMask) == HINT);
|
||||
VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT);
|
||||
switch (instr->ImmHint()) {
|
||||
case NOP: {
|
||||
mnemonic = "nop";
|
||||
@ -1252,7 +1252,7 @@ void Disassembler::ProcessOutput(Instruction* /*instr*/) {
|
||||
|
||||
void Disassembler::Format(Instruction* instr, const char* mnemonic,
|
||||
const char* format) {
|
||||
ASSERT(mnemonic != NULL);
|
||||
VIXL_ASSERT(mnemonic != NULL);
|
||||
ResetOutput();
|
||||
Substitute(instr, mnemonic);
|
||||
if (format != NULL) {
|
||||
@ -1296,7 +1296,7 @@ int Disassembler::SubstituteField(Instruction* instr, const char* format) {
|
||||
case 'O': return SubstituteLSRegOffsetField(instr, format);
|
||||
case 'M': return SubstituteBarrierField(instr, format);
|
||||
default: {
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@ -1321,7 +1321,7 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
|
||||
// Increase field length for registers tagged as stack.
|
||||
@ -1358,7 +1358,7 @@ int Disassembler::SubstituteRegisterField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(format[0] == 'I');
|
||||
VIXL_ASSERT(format[0] == 'I');
|
||||
|
||||
switch (format[1]) {
|
||||
case 'M': { // IMoveImm or IMoveLSL.
|
||||
@ -1366,7 +1366,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
|
||||
AppendToOutput("#0x%" PRIx64, imm);
|
||||
} else {
|
||||
ASSERT(format[5] == 'L');
|
||||
VIXL_ASSERT(format[5] == 'L');
|
||||
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
|
||||
if (instr->ShiftMoveWide() > 0) {
|
||||
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
|
||||
@ -1411,7 +1411,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
return 6;
|
||||
}
|
||||
case 'A': { // IAddSub.
|
||||
ASSERT(instr->ShiftAddSub() <= 1);
|
||||
VIXL_ASSERT(instr->ShiftAddSub() <= 1);
|
||||
int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
|
||||
AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
|
||||
return 7;
|
||||
@ -1459,7 +1459,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
return 6;
|
||||
}
|
||||
default: {
|
||||
UNIMPLEMENTED();
|
||||
VIXL_UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -1468,7 +1468,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT((format[0] == 'I') && (format[1] == 'B'));
|
||||
VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B'));
|
||||
unsigned r = instr->ImmR();
|
||||
unsigned s = instr->ImmS();
|
||||
|
||||
@ -1482,19 +1482,19 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
|
||||
AppendToOutput("#%d", s + 1);
|
||||
return 5;
|
||||
} else {
|
||||
ASSERT(format[3] == '-');
|
||||
VIXL_ASSERT(format[3] == '-');
|
||||
AppendToOutput("#%d", s - r + 1);
|
||||
return 7;
|
||||
}
|
||||
}
|
||||
case 'Z': { // IBZ-r.
|
||||
ASSERT((format[3] == '-') && (format[4] == 'r'));
|
||||
VIXL_ASSERT((format[3] == '-') && (format[4] == 'r'));
|
||||
unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
|
||||
AppendToOutput("#%d", reg_size - r);
|
||||
return 5;
|
||||
}
|
||||
default: {
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -1503,7 +1503,7 @@ int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteLiteralField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(strncmp(format, "LValue", 6) == 0);
|
||||
VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
|
||||
USE(format);
|
||||
|
||||
switch (instr->Mask(LoadLiteralMask)) {
|
||||
@ -1511,7 +1511,7 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
|
||||
case LDR_x_lit:
|
||||
case LDR_s_lit:
|
||||
case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
|
||||
return 6;
|
||||
@ -1519,12 +1519,12 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
|
||||
|
||||
|
||||
int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
|
||||
ASSERT(format[0] == 'H');
|
||||
ASSERT(instr->ShiftDP() <= 0x3);
|
||||
VIXL_ASSERT(format[0] == 'H');
|
||||
VIXL_ASSERT(instr->ShiftDP() <= 0x3);
|
||||
|
||||
switch (format[1]) {
|
||||
case 'D': { // HDP.
|
||||
ASSERT(instr->ShiftDP() != ROR);
|
||||
VIXL_ASSERT(instr->ShiftDP() != ROR);
|
||||
} // Fall through.
|
||||
case 'L': { // HLo.
|
||||
if (instr->ImmDPShift() != 0) {
|
||||
@ -1535,7 +1535,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
|
||||
return 3;
|
||||
}
|
||||
default:
|
||||
UNIMPLEMENTED();
|
||||
VIXL_UNIMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -1543,7 +1543,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
|
||||
|
||||
int Disassembler::SubstituteConditionField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(format[0] == 'C');
|
||||
VIXL_ASSERT(format[0] == 'C');
|
||||
const char* condition_code[] = { "eq", "ne", "hs", "lo",
|
||||
"mi", "pl", "vs", "vc",
|
||||
"hi", "ls", "ge", "lt",
|
||||
@ -1565,27 +1565,27 @@ int Disassembler::SubstituteConditionField(Instruction* instr,
|
||||
int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
|
||||
const char* format) {
|
||||
USE(format);
|
||||
ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
|
||||
VIXL_ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
|
||||
|
||||
int offset = instr->ImmPCRel();
|
||||
|
||||
// Only ADR (AddrPCRelByte) is supported.
|
||||
ASSERT(strcmp(format, "AddrPCRelByte") == 0);
|
||||
VIXL_ASSERT(strcmp(format, "AddrPCRelByte") == 0);
|
||||
|
||||
char sign = '+';
|
||||
if (offset < 0) {
|
||||
offset = -offset;
|
||||
sign = '-';
|
||||
}
|
||||
// TODO: Extend this to support printing the target address.
|
||||
AppendToOutput("#%c0x%x", sign, offset);
|
||||
VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
|
||||
AppendToOutput("#%c0x%x (addr %p)", sign, offset, instr + offset);
|
||||
return 13;
|
||||
}
|
||||
|
||||
|
||||
int Disassembler::SubstituteBranchTargetField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(strncmp(format, "BImm", 4) == 0);
|
||||
VIXL_ASSERT(strncmp(format, "BImm", 4) == 0);
|
||||
|
||||
int64_t offset = 0;
|
||||
switch (format[5]) {
|
||||
@ -1597,7 +1597,7 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
|
||||
case 'm': offset = instr->ImmCmpBranch(); break;
|
||||
// BImmTest - test and branch immediate.
|
||||
case 'e': offset = instr->ImmTestBranch(); break;
|
||||
default: UNIMPLEMENTED();
|
||||
default: VIXL_UNIMPLEMENTED();
|
||||
}
|
||||
offset <<= kInstructionSizeLog2;
|
||||
char sign = '+';
|
||||
@ -1605,15 +1605,16 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
|
||||
offset = -offset;
|
||||
sign = '-';
|
||||
}
|
||||
AppendToOutput("#%c0x%" PRIx64, sign, offset);
|
||||
VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
|
||||
AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset, instr + offset);
|
||||
return 8;
|
||||
}
|
||||
|
||||
|
||||
int Disassembler::SubstituteExtendField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(strncmp(format, "Ext", 3) == 0);
|
||||
ASSERT(instr->ExtendMode() <= 7);
|
||||
VIXL_ASSERT(strncmp(format, "Ext", 3) == 0);
|
||||
VIXL_ASSERT(instr->ExtendMode() <= 7);
|
||||
USE(format);
|
||||
|
||||
const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
|
||||
@ -1639,7 +1640,7 @@ int Disassembler::SubstituteExtendField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(strncmp(format, "Offsetreg", 9) == 0);
|
||||
VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0);
|
||||
const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
|
||||
"undefined", "undefined", "sxtw", "sxtx" };
|
||||
USE(format);
|
||||
@ -1668,7 +1669,7 @@ int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstitutePrefetchField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(format[0] == 'P');
|
||||
VIXL_ASSERT(format[0] == 'P');
|
||||
USE(format);
|
||||
|
||||
int prefetch_mode = instr->PrefetchMode();
|
||||
@ -1683,7 +1684,7 @@ int Disassembler::SubstitutePrefetchField(Instruction* instr,
|
||||
|
||||
int Disassembler::SubstituteBarrierField(Instruction* instr,
|
||||
const char* format) {
|
||||
ASSERT(format[0] == 'M');
|
||||
VIXL_ASSERT(format[0] == 'M');
|
||||
USE(format);
|
||||
|
||||
static const char* options[4][4] = {
|
||||
|
@ -33,20 +33,20 @@ namespace vixl {
|
||||
static uint64_t RotateRight(uint64_t value,
|
||||
unsigned int rotate,
|
||||
unsigned int width) {
|
||||
ASSERT(width <= 64);
|
||||
VIXL_ASSERT(width <= 64);
|
||||
rotate &= 63;
|
||||
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
|
||||
(value >> rotate);
|
||||
return ((value & ((UINT64_C(1) << rotate) - 1)) <<
|
||||
(width - rotate)) | (value >> rotate);
|
||||
}
|
||||
|
||||
|
||||
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
|
||||
uint64_t value,
|
||||
unsigned width) {
|
||||
ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
|
||||
(width == 32));
|
||||
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
uint64_t result = value & ((1UL << width) - 1UL);
|
||||
VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
|
||||
(width == 32));
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
uint64_t result = value & ((UINT64_C(1) << width) - 1);
|
||||
for (unsigned i = width; i < reg_size; i *= 2) {
|
||||
result |= (result << i);
|
||||
}
|
||||
@ -84,7 +84,7 @@ uint64_t Instruction::ImmLogical() {
|
||||
if (imm_s == 0x3F) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (1UL << (imm_s + 1)) - 1;
|
||||
uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
|
||||
return RotateRight(bits, imm_r, 64);
|
||||
} else {
|
||||
if ((imm_s >> 1) == 0x1F) {
|
||||
@ -96,14 +96,14 @@ uint64_t Instruction::ImmLogical() {
|
||||
if ((imm_s & mask) == mask) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
|
||||
uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
|
||||
return RepeatBitsAcrossReg(reg_size,
|
||||
RotateRight(bits, imm_r & mask, width),
|
||||
width);
|
||||
}
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ Instruction* Instruction::ImmPCOffsetTarget() {
|
||||
offset = ImmPCRel();
|
||||
} else {
|
||||
// All PC-relative branches.
|
||||
ASSERT(BranchType() != UnknownBranchType);
|
||||
VIXL_ASSERT(BranchType() != UnknownBranchType);
|
||||
// Relative branch offsets are instruction-size-aligned.
|
||||
offset = ImmBranch() << kInstructionSizeLog2;
|
||||
}
|
||||
@ -169,7 +169,7 @@ inline int Instruction::ImmBranch() const {
|
||||
case UncondBranchType: return ImmUncondBranch();
|
||||
case CompareBranchType: return ImmCmpBranch();
|
||||
case TestBranchType: return ImmTestBranch();
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -186,7 +186,7 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
|
||||
|
||||
void Instruction::SetPCRelImmTarget(Instruction* target) {
|
||||
// ADRP is not supported, so 'this' must point to an ADR instruction.
|
||||
ASSERT(Mask(PCRelAddressingMask) == ADR);
|
||||
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
|
||||
|
||||
Instr imm = Assembler::ImmPCRelAddress(target - this);
|
||||
|
||||
@ -195,7 +195,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
|
||||
|
||||
|
||||
void Instruction::SetBranchImmTarget(Instruction* target) {
|
||||
ASSERT(((target - this) & 3) == 0);
|
||||
VIXL_ASSERT(((target - this) & 3) == 0);
|
||||
Instr branch_imm = 0;
|
||||
uint32_t imm_mask = 0;
|
||||
int offset = (target - this) >> kInstructionSizeLog2;
|
||||
@ -220,14 +220,14 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
|
||||
imm_mask = ImmTestBranch_mask;
|
||||
break;
|
||||
}
|
||||
default: UNREACHABLE();
|
||||
default: VIXL_UNREACHABLE();
|
||||
}
|
||||
SetInstructionBits(Mask(~imm_mask) | branch_imm);
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetImmLLiteral(Instruction* source) {
|
||||
ASSERT(((source - this) & 3) == 0);
|
||||
VIXL_ASSERT(((source - this) & 3) == 0);
|
||||
int offset = (source - this) >> kLiteralEntrySizeLog2;
|
||||
Instr imm = Assembler::ImmLLiteral(offset);
|
||||
Instr mask = ImmLLiteral_mask;
|
||||
|
@ -44,28 +44,34 @@ const unsigned kMaxLoadLiteralRange = 1 * MBytes;
|
||||
const unsigned kWRegSize = 32;
|
||||
const unsigned kWRegSizeLog2 = 5;
|
||||
const unsigned kWRegSizeInBytes = kWRegSize / 8;
|
||||
const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
|
||||
const unsigned kXRegSize = 64;
|
||||
const unsigned kXRegSizeLog2 = 6;
|
||||
const unsigned kXRegSizeInBytes = kXRegSize / 8;
|
||||
const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
|
||||
const unsigned kSRegSize = 32;
|
||||
const unsigned kSRegSizeLog2 = 5;
|
||||
const unsigned kSRegSizeInBytes = kSRegSize / 8;
|
||||
const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
|
||||
const unsigned kDRegSize = 64;
|
||||
const unsigned kDRegSizeLog2 = 6;
|
||||
const unsigned kDRegSizeInBytes = kDRegSize / 8;
|
||||
const int64_t kWRegMask = 0x00000000ffffffffL;
|
||||
const int64_t kXRegMask = 0xffffffffffffffffL;
|
||||
const int64_t kSRegMask = 0x00000000ffffffffL;
|
||||
const int64_t kDRegMask = 0xffffffffffffffffL;
|
||||
const int64_t kXSignMask = 0x1L << 63;
|
||||
const int64_t kWSignMask = 0x1L << 31;
|
||||
const int64_t kByteMask = 0xffL;
|
||||
const int64_t kHalfWordMask = 0xffffL;
|
||||
const int64_t kWordMask = 0xffffffffL;
|
||||
const uint64_t kXMaxUInt = 0xffffffffffffffffUL;
|
||||
const uint64_t kWMaxUInt = 0xffffffffUL;
|
||||
const int64_t kXMaxInt = 0x7fffffffffffffffL;
|
||||
const int64_t kXMinInt = 0x8000000000000000L;
|
||||
const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
|
||||
const uint64_t kWRegMask = 0xffffffff;
|
||||
const uint64_t kXRegMask = 0xffffffffffffffff;
|
||||
const uint64_t kSRegMask = 0xffffffff;
|
||||
const uint64_t kDRegMask = 0xffffffffffffffff;
|
||||
const uint64_t kSSignMask = 0x80000000;
|
||||
const uint64_t kDSignMask = 0x8000000000000000;
|
||||
const uint64_t kWSignMask = 0x80000000;
|
||||
const uint64_t kXSignMask = 0x8000000000000000;
|
||||
const uint64_t kByteMask = 0xff;
|
||||
const uint64_t kHalfWordMask = 0xffff;
|
||||
const uint64_t kWordMask = 0xffffffff;
|
||||
const uint64_t kXMaxUInt = 0xffffffffffffffff;
|
||||
const uint64_t kWMaxUInt = 0xffffffff;
|
||||
const int64_t kXMaxInt = 0x7fffffffffffffff;
|
||||
const int64_t kXMinInt = 0x8000000000000000;
|
||||
const int32_t kWMaxInt = 0x7fffffff;
|
||||
const int32_t kWMinInt = 0x80000000;
|
||||
const unsigned kLinkRegCode = 30;
|
||||
@ -81,8 +87,8 @@ const unsigned kFloatExponentBits = 8;
|
||||
|
||||
const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
|
||||
const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
|
||||
const double kFP64PositiveInfinity = rawbits_to_double(0x7ff0000000000000UL);
|
||||
const double kFP64NegativeInfinity = rawbits_to_double(0xfff0000000000000UL);
|
||||
const double kFP64PositiveInfinity = rawbits_to_double(0x7ff0000000000000);
|
||||
const double kFP64NegativeInfinity = rawbits_to_double(0xfff0000000000000);
|
||||
|
||||
// This value is a signalling NaN as both a double and as a float (taking the
|
||||
// least-significant word).
|
||||
@ -93,6 +99,10 @@ static const float kFP32SignallingNaN = rawbits_to_float(0x7f800001);
|
||||
static const double kFP64QuietNaN = rawbits_to_double(0x7ff800007fc00001);
|
||||
static const float kFP32QuietNaN = rawbits_to_float(0x7fc00001);
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
static const double kFP64DefaultNaN = rawbits_to_double(0x7ff8000000000000);
|
||||
static const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
|
||||
|
||||
|
||||
enum LSDataSize {
|
||||
LSByte = 0,
|
||||
@ -326,7 +336,7 @@ class Instruction {
|
||||
}
|
||||
|
||||
inline Instruction* InstructionAtOffset(int64_t offset) {
|
||||
ASSERT(IsWordAligned(this + offset));
|
||||
VIXL_ASSERT(IsWordAligned(this + offset));
|
||||
return this + offset;
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ namespace vixl {
|
||||
|
||||
Counter::Counter(const char* name, CounterType type)
|
||||
: count_(0), enabled_(false), type_(type) {
|
||||
ASSERT(name != NULL);
|
||||
VIXL_ASSERT(name != NULL);
|
||||
strncpy(name_, name, kCounterNameMaxLength);
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@ void Instrument::Update() {
|
||||
// Increment the instruction counter, and dump all counters if a sample period
|
||||
// has elapsed.
|
||||
static Counter* counter = GetCounter("Instruction");
|
||||
ASSERT(counter->type() == Cumulative);
|
||||
VIXL_ASSERT(counter->type() == Cumulative);
|
||||
counter->Increment();
|
||||
|
||||
if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -121,8 +121,8 @@ class SimRegisterBase {
|
||||
public:
|
||||
template<typename T>
|
||||
void Set(T new_value, unsigned size = sizeof(T)) {
|
||||
ASSERT(size <= kSizeInBytes);
|
||||
ASSERT(size <= sizeof(new_value));
|
||||
VIXL_ASSERT(size <= kSizeInBytes);
|
||||
VIXL_ASSERT(size <= sizeof(new_value));
|
||||
// All AArch64 registers are zero-extending; Writing a W register clears the
|
||||
// top bits of the corresponding X register.
|
||||
memset(value_, 0, kSizeInBytes);
|
||||
@ -133,7 +133,7 @@ class SimRegisterBase {
|
||||
// the result.
|
||||
template<typename T>
|
||||
T Get(unsigned size = sizeof(T)) const {
|
||||
ASSERT(size <= kSizeInBytes);
|
||||
VIXL_ASSERT(size <= kSizeInBytes);
|
||||
T result;
|
||||
memset(&result, 0, sizeof(result));
|
||||
memcpy(&result, value_, size);
|
||||
@ -175,7 +175,7 @@ class Simulator : public DecoderVisitor {
|
||||
|
||||
inline void ExecuteInstruction() {
|
||||
// The program counter should always be aligned.
|
||||
ASSERT(IsWordAligned(pc_));
|
||||
VIXL_ASSERT(IsWordAligned(pc_));
|
||||
decoder_->Decode(pc_);
|
||||
increment_pc();
|
||||
}
|
||||
@ -195,9 +195,9 @@ class Simulator : public DecoderVisitor {
|
||||
inline T reg(unsigned size, unsigned code,
|
||||
Reg31Mode r31mode = Reg31IsZeroRegister) const {
|
||||
unsigned size_in_bytes = size / 8;
|
||||
ASSERT(size_in_bytes <= sizeof(T));
|
||||
ASSERT((size == kXRegSize) || (size == kWRegSize));
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
VIXL_ASSERT(size_in_bytes <= sizeof(T));
|
||||
VIXL_ASSERT((size == kXRegSize) || (size == kWRegSize));
|
||||
VIXL_ASSERT(code < kNumberOfRegisters);
|
||||
|
||||
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
|
||||
T result;
|
||||
@ -237,9 +237,9 @@ class Simulator : public DecoderVisitor {
|
||||
inline void set_reg(unsigned size, unsigned code, T value,
|
||||
Reg31Mode r31mode = Reg31IsZeroRegister) {
|
||||
unsigned size_in_bytes = size / 8;
|
||||
ASSERT(size_in_bytes <= sizeof(T));
|
||||
ASSERT((size == kXRegSize) || (size == kWRegSize));
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
VIXL_ASSERT(size_in_bytes <= sizeof(T));
|
||||
VIXL_ASSERT((size == kXRegSize) || (size == kWRegSize));
|
||||
VIXL_ASSERT(code < kNumberOfRegisters);
|
||||
|
||||
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
|
||||
return;
|
||||
@ -283,9 +283,9 @@ class Simulator : public DecoderVisitor {
|
||||
template<typename T>
|
||||
inline T fpreg(unsigned size, unsigned code) const {
|
||||
unsigned size_in_bytes = size / 8;
|
||||
ASSERT(size_in_bytes <= sizeof(T));
|
||||
ASSERT((size == kDRegSize) || (size == kSRegSize));
|
||||
ASSERT(code < kNumberOfFPRegisters);
|
||||
VIXL_ASSERT(size_in_bytes <= sizeof(T));
|
||||
VIXL_ASSERT((size == kDRegSize) || (size == kSRegSize));
|
||||
VIXL_ASSERT(code < kNumberOfFPRegisters);
|
||||
return fpregisters_[code].Get<T>(size_in_bytes);
|
||||
}
|
||||
|
||||
@ -317,7 +317,7 @@ class Simulator : public DecoderVisitor {
|
||||
case kSRegSize: return sreg(code);
|
||||
case kDRegSize: return dreg(code);
|
||||
default:
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
@ -326,9 +326,9 @@ class Simulator : public DecoderVisitor {
|
||||
// This behaviour matches AArch64 register writes.
|
||||
template<typename T>
|
||||
inline void set_fpreg(unsigned code, T value) {
|
||||
ASSERT((sizeof(value) == kDRegSizeInBytes) ||
|
||||
VIXL_ASSERT((sizeof(value) == kDRegSizeInBytes) ||
|
||||
(sizeof(value) == kSRegSizeInBytes));
|
||||
ASSERT(code < kNumberOfFPRegisters);
|
||||
VIXL_ASSERT(code < kNumberOfFPRegisters);
|
||||
fpregisters_[code].Set(value, sizeof(value));
|
||||
}
|
||||
|
||||
@ -356,8 +356,9 @@ class Simulator : public DecoderVisitor {
|
||||
SimSystemRegister& nzcv() { return nzcv_; }
|
||||
|
||||
// TODO(jbramley): Find a way to make the fpcr_ members return the proper
|
||||
// types, so this accessor is not necessary.
|
||||
// types, so these accessors are not necessary.
|
||||
FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
|
||||
bool DN() { return fpcr_.DN() != 0; }
|
||||
SimSystemRegister& fpcr() { return fpcr_; }
|
||||
|
||||
// Debug helpers
|
||||
@ -375,7 +376,7 @@ class Simulator : public DecoderVisitor {
|
||||
static const char* VRegNameForCode(unsigned code);
|
||||
|
||||
inline bool coloured_trace() { return coloured_trace_; }
|
||||
inline void set_coloured_trace(bool value) { coloured_trace_ = value; }
|
||||
void set_coloured_trace(bool value);
|
||||
|
||||
inline bool disasm_trace() { return disasm_trace_; }
|
||||
inline void set_disasm_trace(bool value) {
|
||||
@ -400,6 +401,19 @@ class Simulator : public DecoderVisitor {
|
||||
}
|
||||
|
||||
protected:
|
||||
const char* clr_normal;
|
||||
const char* clr_flag_name;
|
||||
const char* clr_flag_value;
|
||||
const char* clr_reg_name;
|
||||
const char* clr_reg_value;
|
||||
const char* clr_fpreg_name;
|
||||
const char* clr_fpreg_value;
|
||||
const char* clr_memory_value;
|
||||
const char* clr_memory_address;
|
||||
const char* clr_debug_number;
|
||||
const char* clr_debug_message;
|
||||
const char* clr_printf;
|
||||
|
||||
// Simulation helpers ------------------------------------
|
||||
bool ConditionPassed(Condition cond) {
|
||||
switch (cond) {
|
||||
@ -435,7 +449,7 @@ class Simulator : public DecoderVisitor {
|
||||
case al:
|
||||
return true;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -490,6 +504,9 @@ class Simulator : public DecoderVisitor {
|
||||
uint64_t ReverseBits(uint64_t value, unsigned num_bits);
|
||||
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
|
||||
|
||||
template <typename T>
|
||||
T FPDefaultNaN() const;
|
||||
|
||||
void FPCompare(double val0, double val1);
|
||||
double FPRoundInt(double value, FPRounding round_mode);
|
||||
double FPToDouble(float value);
|
||||
@ -504,17 +521,51 @@ class Simulator : public DecoderVisitor {
|
||||
uint64_t FPToUInt64(double value, FPRounding rmode);
|
||||
|
||||
template <typename T>
|
||||
T FPMax(T a, T b);
|
||||
T FPAdd(T op1, T op2);
|
||||
|
||||
template <typename T>
|
||||
T FPMin(T a, T b);
|
||||
T FPDiv(T op1, T op2);
|
||||
|
||||
template <typename T>
|
||||
T FPMax(T a, T b);
|
||||
|
||||
template <typename T>
|
||||
T FPMaxNM(T a, T b);
|
||||
|
||||
template <typename T>
|
||||
T FPMin(T a, T b);
|
||||
|
||||
template <typename T>
|
||||
T FPMinNM(T a, T b);
|
||||
|
||||
template <typename T>
|
||||
T FPMul(T op1, T op2);
|
||||
|
||||
template <typename T>
|
||||
T FPMulAdd(T a, T op1, T op2);
|
||||
|
||||
template <typename T>
|
||||
T FPSqrt(T op);
|
||||
|
||||
template <typename T>
|
||||
T FPSub(T op1, T op2);
|
||||
|
||||
// This doesn't do anything at the moment. We'll need it if we want support
|
||||
// for cumulative exception bits or floating-point exceptions.
|
||||
void FPProcessException() { }
|
||||
|
||||
// Standard NaN processing.
|
||||
template <typename T>
|
||||
T FPProcessNaN(T op);
|
||||
|
||||
bool FPProcessNaNs(Instruction* instr);
|
||||
|
||||
template <typename T>
|
||||
T FPProcessNaNs(T op1, T op2);
|
||||
|
||||
template <typename T>
|
||||
T FPProcessNaNs3(T op1, T op2, T op3);
|
||||
|
||||
// Pseudo Printf instruction
|
||||
void DoPrintf(Instruction* instr);
|
||||
|
||||
@ -549,9 +600,8 @@ class Simulator : public DecoderVisitor {
|
||||
// functions, or to save and restore it when entering and leaving generated
|
||||
// code.
|
||||
void AssertSupportedFPCR() {
|
||||
ASSERT(fpcr().DN() == 0); // No default-NaN support.
|
||||
ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
|
||||
ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
|
||||
VIXL_ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
|
||||
VIXL_ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
|
||||
|
||||
// The simulator does not support half-precision operations so fpcr().AHP()
|
||||
// is irrelevant, and is not checked here.
|
||||
|
@ -27,6 +27,11 @@
|
||||
#ifndef VIXL_GLOBALS_H
|
||||
#define VIXL_GLOBALS_H
|
||||
|
||||
#ifndef __STDC_CONSTANT_MACROS
|
||||
#define __STDC_CONSTANT_MACROS
|
||||
#endif
|
||||
#include <stdint.h>
|
||||
|
||||
// Get the standard printf format macros for C99 stdint types.
|
||||
#define __STDC_FORMAT_MACROS
|
||||
#include <inttypes.h>
|
||||
@ -44,23 +49,30 @@ typedef uint8_t byte;
|
||||
|
||||
const int KBytes = 1024;
|
||||
const int MBytes = 1024 * KBytes;
|
||||
const int GBytes = 1024 * MBytes;
|
||||
|
||||
#define ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
|
||||
#define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
|
||||
#ifdef DEBUG
|
||||
#define ASSERT(condition) assert(condition)
|
||||
#define CHECK(condition) ASSERT(condition)
|
||||
#define UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); ABORT()
|
||||
#define UNREACHABLE() printf("UNREACHABLE\t"); ABORT()
|
||||
#define VIXL_ASSERT(condition) assert(condition)
|
||||
#define VIXL_CHECK(condition) VIXL_ASSERT(condition)
|
||||
#define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT()
|
||||
#define VIXL_UNREACHABLE() printf("UNREACHABLE\t"); VIXL_ABORT()
|
||||
#else
|
||||
#define ASSERT(condition) ((void) 0)
|
||||
#define CHECK(condition) assert(condition)
|
||||
#define UNIMPLEMENTED() ((void) 0)
|
||||
#define UNREACHABLE() ((void) 0)
|
||||
#define VIXL_ASSERT(condition) ((void) 0)
|
||||
#define VIXL_CHECK(condition) assert(condition)
|
||||
#define VIXL_UNIMPLEMENTED() ((void) 0)
|
||||
#define VIXL_UNREACHABLE() ((void) 0)
|
||||
#endif
|
||||
// This is not as powerful as template based assertions, but it is simple.
|
||||
// It assumes that the descriptions are unique. If this starts being a problem,
|
||||
// we can switch to a different implemention.
|
||||
#define VIXL_CONCAT(a, b) a##b
|
||||
#define VIXL_STATIC_ASSERT_LINE(line, condition) \
|
||||
typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
|
||||
__attribute__((unused))
|
||||
#define VIXL_STATIC_ASSERT(condition) VIXL_STATIC_ASSERT_LINE(__LINE__, condition) //NOLINT
|
||||
|
||||
template <typename T> inline void USE(T) {}
|
||||
|
||||
#define ALIGNMENT_EXCEPTION() printf("ALIGNMENT EXCEPTION\t"); ABORT()
|
||||
#define VIXL_ALIGNMENT_EXCEPTION() printf("ALIGNMENT EXCEPTION\t"); VIXL_ABORT()
|
||||
|
||||
#endif // VIXL_GLOBALS_H
|
||||
|
@ -34,9 +34,7 @@ namespace vixl {
|
||||
// Currently we assume running the simulator implies running on x86 hardware.
|
||||
inline void HostBreakpoint() { asm("int3"); }
|
||||
#else
|
||||
inline void HostBreakpoint() {
|
||||
// TODO: Implement HostBreakpoint on a64.
|
||||
}
|
||||
inline void HostBreakpoint() { asm("brk"); }
|
||||
#endif
|
||||
} // namespace vixl
|
||||
|
||||
|
12
src/utils.cc
12
src/utils.cc
@ -58,9 +58,9 @@ double rawbits_to_double(uint64_t bits) {
|
||||
|
||||
|
||||
int CountLeadingZeros(uint64_t value, int width) {
|
||||
ASSERT((width == 32) || (width == 64));
|
||||
VIXL_ASSERT((width == 32) || (width == 64));
|
||||
int count = 0;
|
||||
uint64_t bit_test = 1UL << (width - 1);
|
||||
uint64_t bit_test = UINT64_C(1) << (width - 1);
|
||||
while ((count < width) && ((bit_test & value) == 0)) {
|
||||
count++;
|
||||
bit_test >>= 1;
|
||||
@ -70,7 +70,7 @@ int CountLeadingZeros(uint64_t value, int width) {
|
||||
|
||||
|
||||
int CountLeadingSignBits(int64_t value, int width) {
|
||||
ASSERT((width == 32) || (width == 64));
|
||||
VIXL_ASSERT((width == 32) || (width == 64));
|
||||
if (value >= 0) {
|
||||
return CountLeadingZeros(value, width) - 1;
|
||||
} else {
|
||||
@ -80,7 +80,7 @@ int CountLeadingSignBits(int64_t value, int width) {
|
||||
|
||||
|
||||
int CountTrailingZeros(uint64_t value, int width) {
|
||||
ASSERT((width == 32) || (width == 64));
|
||||
VIXL_ASSERT((width == 32) || (width == 64));
|
||||
int count = 0;
|
||||
while ((count < width) && (((value >> count) & 1) == 0)) {
|
||||
count++;
|
||||
@ -92,10 +92,10 @@ int CountTrailingZeros(uint64_t value, int width) {
|
||||
int CountSetBits(uint64_t value, int width) {
|
||||
// TODO: Other widths could be added here, as the implementation already
|
||||
// supports them.
|
||||
ASSERT((width == 32) || (width == 64));
|
||||
VIXL_ASSERT((width == 32) || (width == 64));
|
||||
|
||||
// Mask out unused bits to ensure that they are not counted.
|
||||
value &= (0xffffffffffffffffUL >> (64-width));
|
||||
value &= (0xffffffffffffffff >> (64-width));
|
||||
|
||||
// Add up the set bits.
|
||||
// The algorithm works by adding pairs of bit fields together iteratively,
|
||||
|
58
src/utils.h
58
src/utils.h
@ -35,19 +35,19 @@ namespace vixl {
|
||||
|
||||
// Check number width.
|
||||
inline bool is_intn(unsigned n, int64_t x) {
|
||||
ASSERT((0 < n) && (n < 64));
|
||||
int64_t limit = 1L << (n - 1);
|
||||
VIXL_ASSERT((0 < n) && (n < 64));
|
||||
int64_t limit = INT64_C(1) << (n - 1);
|
||||
return (-limit <= x) && (x < limit);
|
||||
}
|
||||
|
||||
inline bool is_uintn(unsigned n, int64_t x) {
|
||||
ASSERT((0 < n) && (n < 64));
|
||||
VIXL_ASSERT((0 < n) && (n < 64));
|
||||
return !(x >> n);
|
||||
}
|
||||
|
||||
inline unsigned truncate_to_intn(unsigned n, int64_t x) {
|
||||
ASSERT((0 < n) && (n < 64));
|
||||
return (x & ((1L << n) - 1));
|
||||
VIXL_ASSERT((0 < n) && (n < 64));
|
||||
return (x & ((INT64_C(1) << n) - 1));
|
||||
}
|
||||
|
||||
#define INT_1_TO_63_LIST(V) \
|
||||
@ -99,7 +99,7 @@ double rawbits_to_double(uint64_t bits);
|
||||
|
||||
// NaN tests.
|
||||
inline bool IsSignallingNaN(double num) {
|
||||
const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
|
||||
const uint64_t kFP64QuietNaNMask = 0x0008000000000000;
|
||||
uint64_t raw = double_to_rawbits(num);
|
||||
if (isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
|
||||
return true;
|
||||
@ -109,7 +109,7 @@ inline bool IsSignallingNaN(double num) {
|
||||
|
||||
|
||||
inline bool IsSignallingNaN(float num) {
|
||||
const uint64_t kFP32QuietNaNMask = 0x00400000UL;
|
||||
const uint32_t kFP32QuietNaNMask = 0x00400000;
|
||||
uint32_t raw = float_to_rawbits(num);
|
||||
if (isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
|
||||
return true;
|
||||
@ -124,7 +124,33 @@ inline bool IsQuietNaN(T num) {
|
||||
}
|
||||
|
||||
|
||||
// Bits counting.
|
||||
// Convert the NaN in 'num' to a quiet NaN.
|
||||
inline double ToQuietNaN(double num) {
|
||||
const uint64_t kFP64QuietNaNMask = 0x0008000000000000;
|
||||
VIXL_ASSERT(isnan(num));
|
||||
return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask);
|
||||
}
|
||||
|
||||
|
||||
inline float ToQuietNaN(float num) {
|
||||
const uint32_t kFP32QuietNaNMask = 0x00400000;
|
||||
VIXL_ASSERT(isnan(num));
|
||||
return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask);
|
||||
}
|
||||
|
||||
|
||||
// Fused multiply-add.
|
||||
inline double FusedMultiplyAdd(double op1, double op2, double a) {
|
||||
return fma(op1, op2, a);
|
||||
}
|
||||
|
||||
|
||||
inline float FusedMultiplyAdd(float op1, float op2, float a) {
|
||||
return fmaf(op1, op2, a);
|
||||
}
|
||||
|
||||
|
||||
// Bit counting.
|
||||
int CountLeadingZeros(uint64_t value, int width);
|
||||
int CountLeadingSignBits(int64_t value, int width);
|
||||
int CountTrailingZeros(uint64_t value, int width);
|
||||
@ -134,20 +160,30 @@ int CountSetBits(uint64_t value, int width);
|
||||
// TODO: rename/refactor to make it specific to instructions.
|
||||
template<typename T>
|
||||
bool IsWordAligned(T pointer) {
|
||||
ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
|
||||
VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
|
||||
return (reinterpret_cast<intptr_t>(pointer) & 3) == 0;
|
||||
}
|
||||
|
||||
// Increment a pointer until it has the specified alignment.
|
||||
template<class T>
|
||||
T AlignUp(T pointer, size_t alignment) {
|
||||
ASSERT(sizeof(pointer) == sizeof(uintptr_t));
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t));
|
||||
uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
|
||||
size_t align_step = (alignment - pointer_raw) % alignment;
|
||||
ASSERT((pointer_raw + align_step) % alignment == 0);
|
||||
VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
|
||||
return reinterpret_cast<T>(pointer_raw + align_step);
|
||||
}
|
||||
|
||||
// Decrement a pointer until it has the specified alignment.
|
||||
template<class T>
|
||||
T AlignDown(T pointer, size_t alignment) {
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t));
|
||||
uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
|
||||
size_t align_step = pointer_raw % alignment;
|
||||
VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);
|
||||
return reinterpret_cast<T>(pointer_raw - align_step);
|
||||
}
|
||||
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
|
@ -46,12 +46,15 @@ bool vixl::Cctest::coloured_trace_ = false;
|
||||
// No instruction statistics by default.
|
||||
bool vixl::Cctest::instruction_stats_ = false;
|
||||
|
||||
// Instantiate a Cctest and add append it to the linked list.
|
||||
// Don't generate simulator test traces by default.
|
||||
bool vixl::Cctest::sim_test_trace_ = false;
|
||||
|
||||
// Instantiate a Cctest and append it to the linked list.
|
||||
vixl::Cctest::Cctest(const char* name, CctestFunction* callback)
|
||||
: name_(name), callback_(callback), next_(NULL) {
|
||||
// Append this cctest to the linked list.
|
||||
if (first_ == NULL) {
|
||||
ASSERT(last_ == NULL);
|
||||
VIXL_ASSERT(last_ == NULL);
|
||||
first_ = this;
|
||||
} else {
|
||||
last_->next_ = this;
|
||||
@ -80,7 +83,8 @@ bool IsSpecialArgument(const char* arg) {
|
||||
(strcmp(arg, "--trace_sim") == 0) ||
|
||||
(strcmp(arg, "--trace_reg") == 0) ||
|
||||
(strcmp(arg, "--coloured_trace") == 0) ||
|
||||
(strcmp(arg, "--instruction_stats") == 0);
|
||||
(strcmp(arg, "--instruction_stats") == 0) ||
|
||||
(strcmp(arg, "--sim_test_trace") == 0);
|
||||
}
|
||||
|
||||
|
||||
@ -93,9 +97,10 @@ void PrintHelpMessage() {
|
||||
"--debugger run in the debugger.\n"
|
||||
"--trace_sim generate a trace of simulated instructions.\n"
|
||||
"--trace_reg generate a trace of simulated registers. "
|
||||
"Implies --debugger.\n"
|
||||
"Implies --debugger.\n"
|
||||
"--coloured_trace generate coloured trace.\n"
|
||||
"--instruction_stats log instruction statistics to vixl_stats.csv.\n");
|
||||
"--instruction_stats log instruction statistics to vixl_stats.csv.\n"
|
||||
"--sim_test_trace Print result traces for SIM_* tests.\n");
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
@ -122,6 +127,10 @@ int main(int argc, char* argv[]) {
|
||||
vixl::Cctest::set_instruction_stats(true);
|
||||
}
|
||||
|
||||
if (IsInArgs("--sim_test_trace", argc, argv)) {
|
||||
vixl::Cctest::set_sim_test_trace(true);
|
||||
}
|
||||
|
||||
if (IsInArgs("--help", argc, argv)) {
|
||||
PrintHelpMessage();
|
||||
|
||||
|
@ -54,6 +54,8 @@ class Cctest {
|
||||
static void set_coloured_trace(bool value) { coloured_trace_ = value; }
|
||||
static bool instruction_stats() { return instruction_stats_; }
|
||||
static void set_instruction_stats(bool value) { instruction_stats_ = value; }
|
||||
static bool sim_test_trace() { return sim_test_trace_; }
|
||||
static void set_sim_test_trace(bool value) { sim_test_trace_ = value; }
|
||||
|
||||
// The debugger is needed to trace register values.
|
||||
static bool run_debugger() { return debug_ || trace_reg_; }
|
||||
@ -70,6 +72,7 @@ class Cctest {
|
||||
static bool trace_reg_;
|
||||
static bool coloured_trace_;
|
||||
static bool instruction_stats_;
|
||||
static bool sim_test_trace_;
|
||||
};
|
||||
|
||||
// Define helper macros for cctest files.
|
||||
|
@ -24,6 +24,9 @@
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The examples only work with the simulator.
|
||||
#ifdef USE_SIMULATOR
|
||||
|
||||
#include "a64/macro-assembler-a64.h"
|
||||
#include "a64/debugger-a64.h"
|
||||
#include "a64/simulator-a64.h"
|
||||
@ -412,3 +415,5 @@ TEST(getting_started) {
|
||||
GETTING_STARTED_DOTEST(0xffffffffffffffff);
|
||||
GETTING_STARTED_DOTEST(0x5a5a5a5a5a5a5a5a);
|
||||
}
|
||||
|
||||
#endif // USE_SIMULATOR
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -553,7 +553,7 @@ TEST(logical_immediate) {
|
||||
}
|
||||
|
||||
// 32-bit patterns.
|
||||
value = 0x00003fff00003fffL;
|
||||
value = 0x00003fff00003fff;
|
||||
for (int i = 0; i < 32; i++) {
|
||||
snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
|
||||
COMPARE(and_(x0, x0, Operand(value)), result);
|
||||
@ -561,7 +561,7 @@ TEST(logical_immediate) {
|
||||
}
|
||||
|
||||
// 16-bit patterns.
|
||||
value = 0x001f001f001f001fL;
|
||||
value = 0x001f001f001f001f;
|
||||
for (int i = 0; i < 16; i++) {
|
||||
snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
|
||||
COMPARE(and_(x0, x0, Operand(value)), result);
|
||||
@ -569,7 +569,7 @@ TEST(logical_immediate) {
|
||||
}
|
||||
|
||||
// 8-bit patterns.
|
||||
value = 0x0e0e0e0e0e0e0e0eL;
|
||||
value = 0x0e0e0e0e0e0e0e0e;
|
||||
for (int i = 0; i < 8; i++) {
|
||||
snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
|
||||
COMPARE(and_(x0, x0, Operand(value)), result);
|
||||
@ -577,7 +577,7 @@ TEST(logical_immediate) {
|
||||
}
|
||||
|
||||
// 4-bit patterns.
|
||||
value = 0x6666666666666666L;
|
||||
value = 0x6666666666666666;
|
||||
for (int i = 0; i < 4; i++) {
|
||||
snprintf(result, RESULT_SIZE, "and x0, x0, #0x%" PRIx64, value);
|
||||
COMPARE(and_(x0, x0, Operand(value)), result);
|
||||
@ -585,9 +585,9 @@ TEST(logical_immediate) {
|
||||
}
|
||||
|
||||
// 2-bit patterns.
|
||||
COMPARE(and_(x0, x0, Operand(0x5555555555555555L)),
|
||||
COMPARE(and_(x0, x0, Operand(0x5555555555555555)),
|
||||
"and x0, x0, #0x5555555555555555");
|
||||
COMPARE(and_(x0, x0, Operand(0xaaaaaaaaaaaaaaaaL)),
|
||||
COMPARE(and_(x0, x0, Operand(0xaaaaaaaaaaaaaaaa)),
|
||||
"and x0, x0, #0xaaaaaaaaaaaaaaaa");
|
||||
|
||||
// Test immediate encoding - 32-bit destination.
|
||||
@ -605,35 +605,35 @@ TEST(logical_immediate) {
|
||||
// Test other instructions.
|
||||
COMPARE(tst(w1, Operand(0x11111111)),
|
||||
"tst w1, #0x11111111");
|
||||
COMPARE(tst(x2, Operand(0x8888888888888888L)),
|
||||
COMPARE(tst(x2, Operand(0x8888888888888888)),
|
||||
"tst x2, #0x8888888888888888");
|
||||
COMPARE(orr(w7, w8, Operand(0xaaaaaaaa)),
|
||||
"orr w7, w8, #0xaaaaaaaa");
|
||||
COMPARE(orr(x9, x10, Operand(0x5555555555555555L)),
|
||||
COMPARE(orr(x9, x10, Operand(0x5555555555555555)),
|
||||
"orr x9, x10, #0x5555555555555555");
|
||||
COMPARE(eor(w15, w16, Operand(0x00000001)),
|
||||
"eor w15, w16, #0x1");
|
||||
COMPARE(eor(x17, x18, Operand(0x0000000000000003L)),
|
||||
COMPARE(eor(x17, x18, Operand(0x0000000000000003)),
|
||||
"eor x17, x18, #0x3");
|
||||
COMPARE(ands(w23, w24, Operand(0x0000000f)), "ands w23, w24, #0xf");
|
||||
COMPARE(ands(x25, x26, Operand(0x800000000000000fL)),
|
||||
COMPARE(ands(x25, x26, Operand(0x800000000000000f)),
|
||||
"ands x25, x26, #0x800000000000000f");
|
||||
|
||||
// Test inverse.
|
||||
COMPARE(bic(w3, w4, Operand(0x20202020)),
|
||||
"and w3, w4, #0xdfdfdfdf");
|
||||
COMPARE(bic(x5, x6, Operand(0x4040404040404040L)),
|
||||
COMPARE(bic(x5, x6, Operand(0x4040404040404040)),
|
||||
"and x5, x6, #0xbfbfbfbfbfbfbfbf");
|
||||
COMPARE(orn(w11, w12, Operand(0x40004000)),
|
||||
"orr w11, w12, #0xbfffbfff");
|
||||
COMPARE(orn(x13, x14, Operand(0x8181818181818181L)),
|
||||
COMPARE(orn(x13, x14, Operand(0x8181818181818181)),
|
||||
"orr x13, x14, #0x7e7e7e7e7e7e7e7e");
|
||||
COMPARE(eon(w19, w20, Operand(0x80000001)),
|
||||
"eor w19, w20, #0x7ffffffe");
|
||||
COMPARE(eon(x21, x22, Operand(0xc000000000000003L)),
|
||||
COMPARE(eon(x21, x22, Operand(0xc000000000000003)),
|
||||
"eor x21, x22, #0x3ffffffffffffffc");
|
||||
COMPARE(bics(w27, w28, Operand(0xfffffff7)), "ands w27, w28, #0x8");
|
||||
COMPARE(bics(x29, x0, Operand(0xfffffffeffffffffL)),
|
||||
COMPARE(bics(x29, x0, Operand(0xfffffffeffffffff)),
|
||||
"ands x29, x0, #0x100000000");
|
||||
|
||||
// Test stack pointer.
|
||||
@ -648,15 +648,15 @@ TEST(logical_immediate) {
|
||||
COMPARE(orr(w2, wzr, Operand(0x00078000)), "mov w2, #0x78000");
|
||||
COMPARE(orr(w3, wzr, Operand(0x00780000)), "orr w3, wzr, #0x780000");
|
||||
COMPARE(orr(w4, wzr, Operand(0x07800000)), "orr w4, wzr, #0x7800000");
|
||||
COMPARE(orr(x5, xzr, Operand(0xffffffffffffc001UL)),
|
||||
COMPARE(orr(x5, xzr, Operand(0xffffffffffffc001)),
|
||||
"orr x5, xzr, #0xffffffffffffc001");
|
||||
COMPARE(orr(x6, xzr, Operand(0xfffffffffffc001fUL)),
|
||||
COMPARE(orr(x6, xzr, Operand(0xfffffffffffc001f)),
|
||||
"mov x6, #0xfffffffffffc001f");
|
||||
COMPARE(orr(x7, xzr, Operand(0xffffffffffc001ffUL)),
|
||||
COMPARE(orr(x7, xzr, Operand(0xffffffffffc001ff)),
|
||||
"mov x7, #0xffffffffffc001ff");
|
||||
COMPARE(orr(x8, xzr, Operand(0xfffffffffc001fffUL)),
|
||||
COMPARE(orr(x8, xzr, Operand(0xfffffffffc001fff)),
|
||||
"mov x8, #0xfffffffffc001fff");
|
||||
COMPARE(orr(x9, xzr, Operand(0xffffffffc001ffffUL)),
|
||||
COMPARE(orr(x9, xzr, Operand(0xffffffffc001ffff)),
|
||||
"orr x9, xzr, #0xffffffffc001ffff");
|
||||
|
||||
CLEANUP();
|
||||
@ -751,14 +751,14 @@ TEST(dp_2_source) {
|
||||
TEST(adr) {
|
||||
SETUP();
|
||||
|
||||
COMPARE(adr(x0, 0), "adr x0, #+0x0");
|
||||
COMPARE(adr(x1, 1), "adr x1, #+0x1");
|
||||
COMPARE(adr(x2, -1), "adr x2, #-0x1");
|
||||
COMPARE(adr(x3, 4), "adr x3, #+0x4");
|
||||
COMPARE(adr(x4, -4), "adr x4, #-0x4");
|
||||
COMPARE(adr(x5, 0x000fffff), "adr x5, #+0xfffff");
|
||||
COMPARE(adr(x6, -0x00100000), "adr x6, #-0x100000");
|
||||
COMPARE(adr(xzr, 0), "adr xzr, #+0x0");
|
||||
COMPARE_PREFIX(adr(x0, 0), "adr x0, #+0x0");
|
||||
COMPARE_PREFIX(adr(x1, 1), "adr x1, #+0x1");
|
||||
COMPARE_PREFIX(adr(x2, -1), "adr x2, #-0x1");
|
||||
COMPARE_PREFIX(adr(x3, 4), "adr x3, #+0x4");
|
||||
COMPARE_PREFIX(adr(x4, -4), "adr x4, #-0x4");
|
||||
COMPARE_PREFIX(adr(x5, 0x000fffff), "adr x5, #+0xfffff");
|
||||
COMPARE_PREFIX(adr(x6, -0x00100000), "adr x6, #-0x100000");
|
||||
COMPARE_PREFIX(adr(xzr, 0), "adr xzr, #+0x0");
|
||||
|
||||
CLEANUP();
|
||||
}
|
||||
@ -767,32 +767,32 @@ TEST(branch) {
|
||||
SETUP();
|
||||
|
||||
#define INST_OFF(x) ((x) >> kInstructionSizeLog2)
|
||||
COMPARE(b(INST_OFF(0x4)), "b #+0x4");
|
||||
COMPARE(b(INST_OFF(-0x4)), "b #-0x4");
|
||||
COMPARE(b(INST_OFF(0x7fffffc)), "b #+0x7fffffc");
|
||||
COMPARE(b(INST_OFF(-0x8000000)), "b #-0x8000000");
|
||||
COMPARE(b(INST_OFF(0xffffc), eq), "b.eq #+0xffffc");
|
||||
COMPARE(b(INST_OFF(-0x100000), mi), "b.mi #-0x100000");
|
||||
COMPARE(b(INST_OFF(0xffffc), al), "b.al #+0xffffc");
|
||||
COMPARE(b(INST_OFF(-0x100000), nv), "b.nv #-0x100000");
|
||||
COMPARE(bl(INST_OFF(0x4)), "bl #+0x4");
|
||||
COMPARE(bl(INST_OFF(-0x4)), "bl #-0x4");
|
||||
COMPARE(bl(INST_OFF(0xffffc)), "bl #+0xffffc");
|
||||
COMPARE(bl(INST_OFF(-0x100000)), "bl #-0x100000");
|
||||
COMPARE(cbz(w0, INST_OFF(0xffffc)), "cbz w0, #+0xffffc");
|
||||
COMPARE(cbz(x1, INST_OFF(-0x100000)), "cbz x1, #-0x100000");
|
||||
COMPARE(cbnz(w2, INST_OFF(0xffffc)), "cbnz w2, #+0xffffc");
|
||||
COMPARE(cbnz(x3, INST_OFF(-0x100000)), "cbnz x3, #-0x100000");
|
||||
COMPARE(tbz(w4, 0, INST_OFF(0x7ffc)), "tbz w4, #0, #+0x7ffc");
|
||||
COMPARE(tbz(x5, 63, INST_OFF(-0x8000)), "tbz x5, #63, #-0x8000");
|
||||
COMPARE(tbz(w6, 31, INST_OFF(0)), "tbz w6, #31, #+0x0");
|
||||
COMPARE(tbz(x7, 31, INST_OFF(0x4)), "tbz w7, #31, #+0x4");
|
||||
COMPARE(tbz(x8, 32, INST_OFF(0x8)), "tbz x8, #32, #+0x8");
|
||||
COMPARE(tbnz(w8, 0, INST_OFF(0x7ffc)), "tbnz w8, #0, #+0x7ffc");
|
||||
COMPARE(tbnz(x9, 63, INST_OFF(-0x8000)), "tbnz x9, #63, #-0x8000");
|
||||
COMPARE(tbnz(w10, 31, INST_OFF(0)), "tbnz w10, #31, #+0x0");
|
||||
COMPARE(tbnz(x11, 31, INST_OFF(0x4)), "tbnz w11, #31, #+0x4");
|
||||
COMPARE(tbnz(x12, 32, INST_OFF(0x8)), "tbnz x12, #32, #+0x8");
|
||||
COMPARE_PREFIX(b(INST_OFF(0x4)), "b #+0x4");
|
||||
COMPARE_PREFIX(b(INST_OFF(-0x4)), "b #-0x4");
|
||||
COMPARE_PREFIX(b(INST_OFF(0x7fffffc)), "b #+0x7fffffc");
|
||||
COMPARE_PREFIX(b(INST_OFF(-0x8000000)), "b #-0x8000000");
|
||||
COMPARE_PREFIX(b(INST_OFF(0xffffc), eq), "b.eq #+0xffffc");
|
||||
COMPARE_PREFIX(b(INST_OFF(-0x100000), mi), "b.mi #-0x100000");
|
||||
COMPARE_PREFIX(b(INST_OFF(0xffffc), al), "b.al #+0xffffc");
|
||||
COMPARE_PREFIX(b(INST_OFF(-0x100000), nv), "b.nv #-0x100000");
|
||||
COMPARE_PREFIX(bl(INST_OFF(0x4)), "bl #+0x4");
|
||||
COMPARE_PREFIX(bl(INST_OFF(-0x4)), "bl #-0x4");
|
||||
COMPARE_PREFIX(bl(INST_OFF(0xffffc)), "bl #+0xffffc");
|
||||
COMPARE_PREFIX(bl(INST_OFF(-0x100000)), "bl #-0x100000");
|
||||
COMPARE_PREFIX(cbz(w0, INST_OFF(0xffffc)), "cbz w0, #+0xffffc");
|
||||
COMPARE_PREFIX(cbz(x1, INST_OFF(-0x100000)), "cbz x1, #-0x100000");
|
||||
COMPARE_PREFIX(cbnz(w2, INST_OFF(0xffffc)), "cbnz w2, #+0xffffc");
|
||||
COMPARE_PREFIX(cbnz(x3, INST_OFF(-0x100000)), "cbnz x3, #-0x100000");
|
||||
COMPARE_PREFIX(tbz(w4, 0, INST_OFF(0x7ffc)), "tbz w4, #0, #+0x7ffc");
|
||||
COMPARE_PREFIX(tbz(x5, 63, INST_OFF(-0x8000)), "tbz x5, #63, #-0x8000");
|
||||
COMPARE_PREFIX(tbz(w6, 31, INST_OFF(0)), "tbz w6, #31, #+0x0");
|
||||
COMPARE_PREFIX(tbz(x7, 31, INST_OFF(0x4)), "tbz w7, #31, #+0x4");
|
||||
COMPARE_PREFIX(tbz(x8, 32, INST_OFF(0x8)), "tbz x8, #32, #+0x8");
|
||||
COMPARE_PREFIX(tbnz(w8, 0, INST_OFF(0x7ffc)), "tbnz w8, #0, #+0x7ffc");
|
||||
COMPARE_PREFIX(tbnz(x9, 63, INST_OFF(-0x8000)), "tbnz x9, #63, #-0x8000");
|
||||
COMPARE_PREFIX(tbnz(w10, 31, INST_OFF(0)), "tbnz w10, #31, #+0x0");
|
||||
COMPARE_PREFIX(tbnz(x11, 31, INST_OFF(0x4)), "tbnz w11, #31, #+0x4");
|
||||
COMPARE_PREFIX(tbnz(x12, 32, INST_OFF(0x8)), "tbnz x12, #32, #+0x8");
|
||||
COMPARE(br(x0), "br x0");
|
||||
COMPARE(blr(x1), "blr x1");
|
||||
COMPARE(ret(x2), "ret x2");
|
||||
@ -1230,10 +1230,10 @@ TEST(load_store_pair_nontemp) {
|
||||
TEST(load_literal) {
|
||||
SETUP();
|
||||
|
||||
COMPARE_PREFIX(ldr(x10, 0x1234567890abcdefUL), "ldr x10, pc+8");
|
||||
COMPARE_PREFIX(ldr(x10, 0x1234567890abcdef), "ldr x10, pc+8");
|
||||
COMPARE_PREFIX(ldr(w20, 0xfedcba09), "ldr w20, pc+8");
|
||||
COMPARE_PREFIX(ldr(d11, 1.234), "ldr d11, pc+8");
|
||||
COMPARE_PREFIX(ldr(s22, 2.5), "ldr s22, pc+8");
|
||||
COMPARE_PREFIX(ldr(s22, 2.5f), "ldr s22, pc+8");
|
||||
|
||||
CLEANUP();
|
||||
}
|
||||
@ -1316,8 +1316,8 @@ TEST(cond_cmp_macro) {
|
||||
TEST(fmov_imm) {
|
||||
SETUP();
|
||||
|
||||
COMPARE(fmov(s0, 1.0), "fmov s0, #0x70 (1.0000)");
|
||||
COMPARE(fmov(s31, -13.0), "fmov s31, #0xaa (-13.0000)");
|
||||
COMPARE(fmov(s0, 1.0f), "fmov s0, #0x70 (1.0000)");
|
||||
COMPARE(fmov(s31, -13.0f), "fmov s31, #0xaa (-13.0000)");
|
||||
COMPARE(fmov(d1, 1.0), "fmov d1, #0x70 (1.0000)");
|
||||
COMPARE(fmov(d29, -13.0), "fmov d29, #0xaa (-13.0000)");
|
||||
|
||||
@ -1556,7 +1556,7 @@ TEST(unreachable) {
|
||||
SETUP_CLASS(MacroAssembler);
|
||||
|
||||
#ifdef USE_SIMULATOR
|
||||
ASSERT(kUnreachableOpcode == 0xdeb0);
|
||||
VIXL_ASSERT(kUnreachableOpcode == 0xdeb0);
|
||||
COMPARE(Unreachable(), "hlt #0xdeb0");
|
||||
#else
|
||||
COMPARE(Unreachable(), "blr xzr");
|
||||
@ -1570,7 +1570,7 @@ TEST(unreachable) {
|
||||
TEST(trace) {
|
||||
SETUP_CLASS(MacroAssembler);
|
||||
|
||||
ASSERT(kTraceOpcode == 0xdeb2);
|
||||
VIXL_ASSERT(kTraceOpcode == 0xdeb2);
|
||||
|
||||
// All Trace calls should produce the same instruction.
|
||||
COMPARE(Trace(LOG_ALL, TRACE_ENABLE), "hlt #0xdeb2");
|
||||
@ -1585,7 +1585,7 @@ TEST(trace) {
|
||||
TEST(log) {
|
||||
SETUP_CLASS(MacroAssembler);
|
||||
|
||||
ASSERT(kLogOpcode == 0xdeb3);
|
||||
VIXL_ASSERT(kLogOpcode == 0xdeb3);
|
||||
|
||||
// All Log calls should produce the same instruction.
|
||||
COMPARE(Log(LOG_ALL), "hlt #0xdeb3");
|
||||
|
@ -89,7 +89,7 @@ TEST(decoder_pedantic) {
|
||||
Decoder decoder;
|
||||
Instruction buffer[kInstructionSize];
|
||||
|
||||
for (uint64_t i = 0; i < (1UL << 32); i++) {
|
||||
for (uint64_t i = 0; i < (UINT64_C(1) << 32); i++) {
|
||||
if ((i & 0xffffff) == 0) {
|
||||
fprintf(stderr, "0x%08" PRIx32 "\n", static_cast<uint32_t>(i));
|
||||
}
|
||||
@ -106,7 +106,7 @@ TEST(disasm_pedantic) {
|
||||
Instruction buffer[kInstructionSize];
|
||||
|
||||
decoder.AppendVisitor(&disasm);
|
||||
for (uint64_t i = 0; i < (1UL << 32); i++) {
|
||||
for (uint64_t i = 0; i < (UINT64_C(1) << 32); i++) {
|
||||
if ((i & 0xffff) == 0) {
|
||||
fprintf(stderr, "0x%08" PRIx32 "\n", static_cast<uint32_t>(i));
|
||||
}
|
||||
|
572
test/test-simulator-a64.cc
Normal file
572
test/test-simulator-a64.cc
Normal file
@ -0,0 +1,572 @@
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdio.h>
|
||||
#include <float.h>
|
||||
|
||||
#include "cctest.h"
|
||||
#include "test-utils-a64.h"
|
||||
#include "test-simulator-inputs-a64.h"
|
||||
#include "test-simulator-traces-a64.h"
|
||||
#include "a64/macro-assembler-a64.h"
|
||||
#include "a64/simulator-a64.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
// ==== Simulator Tests ====
|
||||
//
|
||||
// These simulator tests check instruction behaviour against a trace taken from
|
||||
// real AArch64 hardware. The same test code is used to generate the trace; the
|
||||
// results are printed to stdout when the test is run with --sim_test_trace.
|
||||
//
|
||||
// The input lists and expected results are stored in
|
||||
// test/test-simulator-traces-a64.h. The expected results can be regenerated
|
||||
// using tools/generate_simulator_traces.py.
|
||||
|
||||
#define __ masm.
|
||||
#define TEST(name) TEST_(SIM_##name)
|
||||
|
||||
#define BUF_SIZE (256)
|
||||
|
||||
#ifdef USE_SIMULATOR
|
||||
|
||||
#define SETUP() \
|
||||
byte* buf = new byte[BUF_SIZE]; \
|
||||
MacroAssembler masm(buf, BUF_SIZE); \
|
||||
Decoder decoder; \
|
||||
Simulator* simulator = NULL; \
|
||||
if (Cctest::run_debugger()) { \
|
||||
simulator = new Debugger(&decoder); \
|
||||
} else { \
|
||||
simulator = new Simulator(&decoder); \
|
||||
simulator->set_disasm_trace(Cctest::trace_sim()); \
|
||||
} \
|
||||
simulator->set_coloured_trace(Cctest::coloured_trace()); \
|
||||
simulator->set_instruction_stats(Cctest::instruction_stats());
|
||||
|
||||
#define START() \
|
||||
masm.Reset(); \
|
||||
simulator->ResetState(); \
|
||||
__ PushCalleeSavedRegisters(); \
|
||||
if (Cctest::run_debugger()) { \
|
||||
if (Cctest::trace_reg()) { \
|
||||
__ Trace(LOG_STATE, TRACE_ENABLE); \
|
||||
} \
|
||||
if (Cctest::trace_sim()) { \
|
||||
__ Trace(LOG_DISASM, TRACE_ENABLE); \
|
||||
} \
|
||||
} \
|
||||
if (Cctest::instruction_stats()) { \
|
||||
__ EnableInstrumentation(); \
|
||||
}
|
||||
|
||||
#define END() \
|
||||
if (Cctest::instruction_stats()) { \
|
||||
__ DisableInstrumentation(); \
|
||||
} \
|
||||
if (Cctest::run_debugger()) { \
|
||||
__ Trace(LOG_ALL, TRACE_DISABLE); \
|
||||
} \
|
||||
__ PopCalleeSavedRegisters(); \
|
||||
__ Ret(); \
|
||||
masm.FinalizeCode()
|
||||
|
||||
#define RUN() \
|
||||
simulator->RunFrom(reinterpret_cast<Instruction*>(buf))
|
||||
|
||||
#define TEARDOWN() \
|
||||
delete simulator; \
|
||||
delete[] buf;
|
||||
|
||||
#else // USE_SIMULATOR
|
||||
|
||||
#define SETUP() \
|
||||
byte* buf = new byte[BUF_SIZE]; \
|
||||
MacroAssembler masm(buf, BUF_SIZE); \
|
||||
CPU::SetUp()
|
||||
|
||||
#define START() \
|
||||
masm.Reset(); \
|
||||
__ PushCalleeSavedRegisters()
|
||||
|
||||
#define END() \
|
||||
__ PopCalleeSavedRegisters(); \
|
||||
__ Ret(); \
|
||||
masm.FinalizeCode()
|
||||
|
||||
#define RUN() \
|
||||
CPU::EnsureIAndDCacheCoherency(buf, BUF_SIZE); \
|
||||
{ \
|
||||
void (*test_function)(void); \
|
||||
VIXL_ASSERT(sizeof(buf) == sizeof(test_function)); \
|
||||
memcpy(&test_function, &buf, sizeof(buf)); \
|
||||
test_function(); \
|
||||
}
|
||||
|
||||
#define TEARDOWN() \
|
||||
delete[] buf;
|
||||
|
||||
#endif // USE_SIMULATOR
|
||||
|
||||
|
||||
// The maximum number of errors to report in detail for each test.
|
||||
static const unsigned kErrorReportLimit = 8;
|
||||
|
||||
|
||||
// MacroAssembler member function pointers to pass to the test dispatchers.
|
||||
typedef void (MacroAssembler::*Test1OpFPHelper_t)(const FPRegister& fd,
|
||||
const FPRegister& fn);
|
||||
typedef void (MacroAssembler::*Test2OpFPHelper_t)(const FPRegister& fd,
|
||||
const FPRegister& fn,
|
||||
const FPRegister& fm);
|
||||
typedef void (MacroAssembler::*Test3OpFPHelper_t)(const FPRegister& fd,
|
||||
const FPRegister& fn,
|
||||
const FPRegister& fm,
|
||||
const FPRegister& fa);
|
||||
|
||||
// Standard test dispatchers.
|
||||
|
||||
|
||||
static void Test2Op_Helper(Test2OpFPHelper_t helper,
|
||||
uintptr_t inputs, unsigned inputs_length,
|
||||
uintptr_t results, unsigned reg_size) {
|
||||
VIXL_ASSERT((reg_size == kDRegSize) || (reg_size == kSRegSize));
|
||||
|
||||
SETUP();
|
||||
START();
|
||||
|
||||
// Roll up the loop to keep the code size down.
|
||||
Label loop_n, loop_m, loop_a;
|
||||
|
||||
Register out = x0;
|
||||
Register inputs_base = x1;
|
||||
Register length = w2;
|
||||
Register index_n = w3;
|
||||
Register index_m = w4;
|
||||
|
||||
bool double_op = reg_size == kDRegSize;
|
||||
const int index_shift =
|
||||
double_op ? kDRegSizeInBytesLog2 : kSRegSizeInBytesLog2;
|
||||
|
||||
FPRegister fd = double_op ? d0 : s0;
|
||||
FPRegister fn = double_op ? d1 : s1;
|
||||
FPRegister fm = double_op ? d2 : s2;
|
||||
|
||||
__ Mov(out, results);
|
||||
__ Mov(inputs_base, inputs);
|
||||
__ Mov(length, inputs_length);
|
||||
|
||||
__ Mov(index_n, 0);
|
||||
__ Bind(&loop_n);
|
||||
__ Ldr(fn, MemOperand(inputs_base, index_n, UXTW, index_shift));
|
||||
|
||||
__ Mov(index_m, 0);
|
||||
__ Bind(&loop_m);
|
||||
__ Ldr(fm, MemOperand(inputs_base, index_m, UXTW, index_shift));
|
||||
|
||||
(masm.*helper)(fd, fn, fm);
|
||||
__ Str(fd, MemOperand(out, fd.SizeInBytes(), PostIndex));
|
||||
|
||||
__ Add(index_m, index_m, 1);
|
||||
__ Cmp(index_m, inputs_length);
|
||||
__ B(lo, &loop_m);
|
||||
|
||||
__ Add(index_n, index_n, 1);
|
||||
__ Cmp(index_n, inputs_length);
|
||||
__ B(lo, &loop_n);
|
||||
|
||||
END();
|
||||
RUN();
|
||||
TEARDOWN();
|
||||
}
|
||||
|
||||
|
||||
// Test FP instructions using doubles. The inputs[] and expected[] arrays should
|
||||
// be arrays of rawbits representations of doubles. This ensures that exact bit
|
||||
// comparisons can be performed.
|
||||
static void Test2Op(const char * name, Test2OpFPHelper_t helper,
|
||||
const uint64_t inputs[], unsigned inputs_length,
|
||||
const uint64_t expected[], unsigned expected_length) {
|
||||
VIXL_ASSERT(inputs_length > 0);
|
||||
|
||||
static unsigned results_length = inputs_length * inputs_length;
|
||||
uint64_t * results = new uint64_t[results_length];
|
||||
|
||||
Test2Op_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
|
||||
reinterpret_cast<uintptr_t>(results), kDRegSize);
|
||||
|
||||
if (Cctest::sim_test_trace()) {
|
||||
// Print the results.
|
||||
printf("const uint64_t kExpected_%s[] = {\n", name);
|
||||
for (unsigned d = 0; d < results_length; d++) {
|
||||
printf(" 0x%016" PRIx64 ",\n", results[d]);
|
||||
}
|
||||
printf("};\n");
|
||||
printf("const unsigned kExpectedCount_%s =\n"
|
||||
" sizeof(kExpected_%s) / sizeof(kExpected_%s[0]);\n",
|
||||
name, name, name);
|
||||
} else {
|
||||
// Check the results.
|
||||
VIXL_CHECK(expected_length == results_length);
|
||||
unsigned error_count = 0;
|
||||
unsigned d = 0;
|
||||
for (unsigned n = 0; n < inputs_length; n++) {
|
||||
for (unsigned m = 0; m < inputs_length; m++, d++) {
|
||||
if (results[d] != expected[d]) {
|
||||
if (++error_count > kErrorReportLimit) continue;
|
||||
|
||||
printf("%s 0x%016" PRIx64 ", 0x%016" PRIx64 " (%s %g, %g):\n",
|
||||
name, inputs[n], inputs[m],
|
||||
name,
|
||||
rawbits_to_double(inputs[n]),
|
||||
rawbits_to_double(inputs[m]));
|
||||
printf(" Expected: 0x%016" PRIx64 " (%g)\n",
|
||||
expected[d], rawbits_to_double(expected[d]));
|
||||
printf(" Found: 0x%016" PRIx64 " (%g)\n",
|
||||
results[d], rawbits_to_double(results[d]));
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
VIXL_ASSERT(d == expected_length);
|
||||
if (error_count > kErrorReportLimit) {
|
||||
printf("%u other errors follow.\n", error_count - kErrorReportLimit);
|
||||
}
|
||||
VIXL_CHECK(error_count == 0);
|
||||
}
|
||||
delete[] results;
|
||||
}
|
||||
|
||||
|
||||
// Test FP instructions using floats. The inputs[] and expected[] arrays should
|
||||
// be arrays of rawbits representations of floats. This ensures that exact bit
|
||||
// comparisons can be performed.
|
||||
static void Test2Op(const char * name, Test2OpFPHelper_t helper,
|
||||
const uint32_t inputs[], unsigned inputs_length,
|
||||
const uint32_t expected[], unsigned expected_length) {
|
||||
VIXL_ASSERT(inputs_length > 0);
|
||||
|
||||
static unsigned results_length = inputs_length * inputs_length;
|
||||
uint32_t * results = new uint32_t[results_length];
|
||||
|
||||
Test2Op_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
|
||||
reinterpret_cast<uintptr_t>(results), kSRegSize);
|
||||
|
||||
if (Cctest::sim_test_trace()) {
|
||||
// Print the results.
|
||||
printf("const uint32_t kExpected_%s[] = {\n", name);
|
||||
for (unsigned d = 0; d < results_length; d++) {
|
||||
printf(" 0x%08" PRIx32 ",\n", results[d]);
|
||||
}
|
||||
printf("};\n");
|
||||
printf("const unsigned kExpectedCount_%s =\n"
|
||||
" sizeof(kExpected_%s) / sizeof(kExpected_%s[0]);\n",
|
||||
name, name, name);
|
||||
} else {
|
||||
// Check the results.
|
||||
VIXL_CHECK(expected_length == results_length);
|
||||
unsigned error_count = 0;
|
||||
unsigned d = 0;
|
||||
for (unsigned n = 0; n < inputs_length; n++) {
|
||||
for (unsigned m = 0; m < inputs_length; m++, d++) {
|
||||
if (results[d] != expected[d]) {
|
||||
if (++error_count > kErrorReportLimit) continue;
|
||||
|
||||
printf("%s 0x%08" PRIx32 ", 0x%08" PRIx32 " (%s %g, %g):\n",
|
||||
name, inputs[n], inputs[m],
|
||||
name,
|
||||
rawbits_to_float(inputs[n]),
|
||||
rawbits_to_float(inputs[m]));
|
||||
printf(" Expected: 0x%08" PRIx32 " (%g)\n",
|
||||
expected[d], rawbits_to_float(expected[d]));
|
||||
printf(" Found: 0x%08" PRIx32 " (%g)\n",
|
||||
results[d], rawbits_to_float(results[d]));
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
VIXL_ASSERT(d == expected_length);
|
||||
if (error_count > kErrorReportLimit) {
|
||||
printf("%u other errors follow.\n", error_count - kErrorReportLimit);
|
||||
}
|
||||
VIXL_CHECK(error_count == 0);
|
||||
}
|
||||
delete[] results;
|
||||
}
|
||||
|
||||
|
||||
static void Test3Op_Helper(Test3OpFPHelper_t helper,
|
||||
uintptr_t inputs, unsigned inputs_length,
|
||||
uintptr_t results, unsigned reg_size) {
|
||||
VIXL_ASSERT((reg_size == kDRegSize) || (reg_size == kSRegSize));
|
||||
|
||||
SETUP();
|
||||
START();
|
||||
|
||||
// Roll up the loop to keep the code size down.
|
||||
Label loop_n, loop_m, loop_a;
|
||||
|
||||
Register out = x0;
|
||||
Register inputs_base = x1;
|
||||
Register length = w2;
|
||||
Register index_n = w3;
|
||||
Register index_m = w4;
|
||||
Register index_a = w5;
|
||||
|
||||
bool double_op = reg_size == kDRegSize;
|
||||
const int index_shift =
|
||||
double_op ? kDRegSizeInBytesLog2 : kSRegSizeInBytesLog2;
|
||||
|
||||
FPRegister fd = double_op ? d0 : s0;
|
||||
FPRegister fn = double_op ? d1 : s1;
|
||||
FPRegister fm = double_op ? d2 : s2;
|
||||
FPRegister fa = double_op ? d3 : s3;
|
||||
|
||||
__ Mov(out, results);
|
||||
__ Mov(inputs_base, inputs);
|
||||
__ Mov(length, inputs_length);
|
||||
|
||||
__ Mov(index_n, 0);
|
||||
__ Bind(&loop_n);
|
||||
__ Ldr(fn, MemOperand(inputs_base, index_n, UXTW, index_shift));
|
||||
|
||||
__ Mov(index_m, 0);
|
||||
__ Bind(&loop_m);
|
||||
__ Ldr(fm, MemOperand(inputs_base, index_m, UXTW, index_shift));
|
||||
|
||||
__ Mov(index_a, 0);
|
||||
__ Bind(&loop_a);
|
||||
__ Ldr(fa, MemOperand(inputs_base, index_a, UXTW, index_shift));
|
||||
|
||||
(masm.*helper)(fd, fn, fm, fa);
|
||||
__ Str(fd, MemOperand(out, fd.SizeInBytes(), PostIndex));
|
||||
|
||||
__ Add(index_a, index_a, 1);
|
||||
__ Cmp(index_a, inputs_length);
|
||||
__ B(lo, &loop_a);
|
||||
|
||||
__ Add(index_m, index_m, 1);
|
||||
__ Cmp(index_m, inputs_length);
|
||||
__ B(lo, &loop_m);
|
||||
|
||||
__ Add(index_n, index_n, 1);
|
||||
__ Cmp(index_n, inputs_length);
|
||||
__ B(lo, &loop_n);
|
||||
|
||||
END();
|
||||
RUN();
|
||||
TEARDOWN();
|
||||
}
|
||||
|
||||
|
||||
// Test FP instructions using doubles. The inputs[] and expected[] arrays should
|
||||
// be arrays of rawbits representations of doubles. This ensures that exact bit
|
||||
// comparisons can be performed.
|
||||
static void Test3Op(const char * name, Test3OpFPHelper_t helper,
|
||||
const uint64_t inputs[], unsigned inputs_length,
|
||||
const uint64_t expected[], unsigned expected_length) {
|
||||
VIXL_ASSERT(inputs_length > 0);
|
||||
|
||||
static unsigned results_length =
|
||||
inputs_length * inputs_length * inputs_length;
|
||||
uint64_t * results = new uint64_t[results_length];
|
||||
|
||||
Test3Op_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
|
||||
reinterpret_cast<uintptr_t>(results), kDRegSize);
|
||||
|
||||
if (Cctest::sim_test_trace()) {
|
||||
// Print the results.
|
||||
printf("const uint64_t kExpected_%s[] = {\n", name);
|
||||
for (unsigned d = 0; d < results_length; d++) {
|
||||
printf(" 0x%016" PRIx64 ",\n", results[d]);
|
||||
}
|
||||
printf("};\n");
|
||||
printf("const unsigned kExpectedCount_%s =\n"
|
||||
" sizeof(kExpected_%s) / sizeof(kExpected_%s[0]);\n",
|
||||
name, name, name);
|
||||
} else {
|
||||
// Check the results.
|
||||
VIXL_CHECK(expected_length == results_length);
|
||||
unsigned error_count = 0;
|
||||
unsigned d = 0;
|
||||
for (unsigned n = 0; n < inputs_length; n++) {
|
||||
for (unsigned m = 0; m < inputs_length; m++) {
|
||||
for (unsigned a = 0; a < inputs_length; a++, d++) {
|
||||
if (results[d] != expected[d]) {
|
||||
if (++error_count > kErrorReportLimit) continue;
|
||||
|
||||
printf("%s 0x%016" PRIx64 ", 0x%016" PRIx64 ", 0x%016" PRIx64 " "
|
||||
"(%s %g, %g, %g):\n",
|
||||
name, inputs[n], inputs[m], inputs[a],
|
||||
name,
|
||||
rawbits_to_double(inputs[n]),
|
||||
rawbits_to_double(inputs[m]),
|
||||
rawbits_to_double(inputs[a]));
|
||||
printf(" Expected: 0x%016" PRIx64 " (%g)\n",
|
||||
expected[d], rawbits_to_double(expected[d]));
|
||||
printf(" Found: 0x%016" PRIx64 " (%g)\n",
|
||||
results[d], rawbits_to_double(results[d]));
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
VIXL_ASSERT(d == expected_length);
|
||||
if (error_count > kErrorReportLimit) {
|
||||
printf("%u other errors follow.\n", error_count - kErrorReportLimit);
|
||||
}
|
||||
VIXL_CHECK(error_count == 0);
|
||||
}
|
||||
delete[] results;
|
||||
}
|
||||
|
||||
|
||||
// Test FP instructions using floats. The inputs[] and expected[] arrays should
|
||||
// be arrays of rawbits representations of floats. This ensures that exact bit
|
||||
// comparisons can be performed.
|
||||
static void Test3Op(const char * name, Test3OpFPHelper_t helper,
|
||||
const uint32_t inputs[], unsigned inputs_length,
|
||||
const uint32_t expected[], unsigned expected_length) {
|
||||
VIXL_ASSERT(inputs_length > 0);
|
||||
|
||||
static unsigned results_length =
|
||||
inputs_length * inputs_length * inputs_length;
|
||||
uint32_t * results = new uint32_t[results_length];
|
||||
|
||||
Test3Op_Helper(helper, reinterpret_cast<uintptr_t>(inputs), inputs_length,
|
||||
reinterpret_cast<uintptr_t>(results), kSRegSize);
|
||||
|
||||
if (Cctest::sim_test_trace()) {
|
||||
// Print the results.
|
||||
printf("const uint32_t kExpected_%s[] = {\n", name);
|
||||
for (unsigned d = 0; d < results_length; d++) {
|
||||
printf(" 0x%08" PRIx32 ",\n", results[d]);
|
||||
}
|
||||
printf("};\n");
|
||||
printf("const unsigned kExpectedCount_%s =\n"
|
||||
" sizeof(kExpected_%s) / sizeof(kExpected_%s[0]);\n",
|
||||
name, name, name);
|
||||
} else {
|
||||
// Check the results.
|
||||
VIXL_CHECK(expected_length == results_length);
|
||||
unsigned error_count = 0;
|
||||
unsigned d = 0;
|
||||
for (unsigned n = 0; n < inputs_length; n++) {
|
||||
for (unsigned m = 0; m < inputs_length; m++) {
|
||||
for (unsigned a = 0; a < inputs_length; a++, d++) {
|
||||
if (results[d] != expected[d]) {
|
||||
if (++error_count > kErrorReportLimit) continue;
|
||||
|
||||
printf("%s 0x%08" PRIx32 ", 0x%08" PRIx32 ", 0x%08" PRIx32 " "
|
||||
"(%s %g, %g, %g):\n",
|
||||
name, inputs[n], inputs[m], inputs[a],
|
||||
name,
|
||||
rawbits_to_float(inputs[n]),
|
||||
rawbits_to_float(inputs[m]),
|
||||
rawbits_to_float(inputs[a]));
|
||||
printf(" Expected: 0x%08" PRIx32 " (%g)\n",
|
||||
expected[d], rawbits_to_float(expected[d]));
|
||||
printf(" Found: 0x%08" PRIx32 " (%g)\n",
|
||||
results[d], rawbits_to_float(results[d]));
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
VIXL_ASSERT(d == expected_length);
|
||||
if (error_count > kErrorReportLimit) {
|
||||
printf("%u other errors follow.\n", error_count - kErrorReportLimit);
|
||||
}
|
||||
VIXL_CHECK(error_count == 0);
|
||||
}
|
||||
delete[] results;
|
||||
}
|
||||
|
||||
|
||||
// Floating-point tests.
|
||||
|
||||
|
||||
// Standard floating-point test expansion for both double- and single-precision
|
||||
// operations.
|
||||
#define STRINGIFY(s) #s
|
||||
#define DEFINE_TEST_FP(mnemonic, type) \
|
||||
TEST(mnemonic##_d) { \
|
||||
Test##type(STRINGIFY(mnemonic) "_d", \
|
||||
&MacroAssembler::mnemonic, \
|
||||
kInputDouble, kInputDoubleCount, \
|
||||
kExpected_##mnemonic##_d, \
|
||||
kExpectedCount_##mnemonic##_d); \
|
||||
} \
|
||||
\
|
||||
TEST(mnemonic##_s) { \
|
||||
Test##type(STRINGIFY(mnemonic) "_s", \
|
||||
&MacroAssembler::mnemonic, \
|
||||
kInputFloat, kInputFloatCount, \
|
||||
kExpected_##mnemonic##_s, \
|
||||
kExpectedCount_##mnemonic##_s); \
|
||||
}
|
||||
|
||||
|
||||
// TODO(jbramley): Fabs
|
||||
|
||||
DEFINE_TEST_FP(fadd, 2Op)
|
||||
|
||||
// TODO(jbramley): Fccmp
|
||||
// TODO(jbramley): Fcmp
|
||||
// TODO(jbramley): Fcsel
|
||||
// TODO(jbramley): Fcvt
|
||||
// TODO(jbramley): Fcvt-to-integer
|
||||
// TODO(jbramley): Fcvt-to-fixed-point
|
||||
|
||||
DEFINE_TEST_FP(fdiv, 2Op)
|
||||
DEFINE_TEST_FP(fmadd, 3Op)
|
||||
DEFINE_TEST_FP(fmax, 2Op)
|
||||
DEFINE_TEST_FP(fmaxnm, 2Op)
|
||||
DEFINE_TEST_FP(fmin, 2Op)
|
||||
DEFINE_TEST_FP(fminnm, 2Op)
|
||||
|
||||
// TODO(jbramley): Fmov
|
||||
|
||||
DEFINE_TEST_FP(fmsub, 3Op)
|
||||
DEFINE_TEST_FP(fmul, 2Op)
|
||||
|
||||
// TODO(jbramley): Fneg
|
||||
|
||||
DEFINE_TEST_FP(fnmadd, 3Op)
|
||||
DEFINE_TEST_FP(fnmsub, 3Op)
|
||||
|
||||
// TODO(jbramley): Frint-to-integer
|
||||
// TODO(jbramley): Fsqrt
|
||||
|
||||
DEFINE_TEST_FP(fsub, 2Op)
|
||||
|
||||
// TODO(jbramley): Scvtf-fixed-point
|
||||
// TODO(jbramley): Scvtf-integer
|
||||
// TODO(jbramley): Ucvtf-fixed-point
|
||||
// TODO(jbramley): Ucvtf-integer
|
||||
|
||||
|
||||
} // namespace vixl
|
218
test/test-simulator-inputs-a64.h
Normal file
218
test/test-simulator-inputs-a64.h
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright 2014, ARM Limited
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// This file holds inputs for the instructions tested by test-simulator-a64.
|
||||
//
|
||||
// If the input lists are updated, please run tools/generate_simulator_traces.py
|
||||
// on a reference platform to regenerate the expected outputs. The outputs are
|
||||
// stored in test-simulator-traces-a64.h.
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
// This header should only be used by test/test-simulator-a64.cc, so it
|
||||
// doesn't need the usual header guard.
|
||||
#ifdef VIXL_A64_TEST_SIMULATOR_INPUTS_A64_H_
|
||||
#error This header should be inluded only once.
|
||||
#endif
|
||||
#define VIXL_A64_TEST_SIMULATOR_INPUTS_A64_H_
|
||||
|
||||
// Double values, stored as uint64_t representations. This ensures exact bit
|
||||
// representation, and avoids the loss of NaNs and suchlike through C++ casts.
|
||||
static const uint64_t kInputDouble[] = {
|
||||
// Simple values.
|
||||
0x0000000000000000, // 0.0
|
||||
0x0010000000000000, // The smallest normal value.
|
||||
0x3fdfffffffffffff, // The value just below 0.5.
|
||||
0x3fe0000000000000, // 0.5
|
||||
0x3fe0000000000001, // The value just above 0.5.
|
||||
0x3fefffffffffffff, // The value just below 1.0.
|
||||
0x3ff0000000000000, // 1.0
|
||||
0x3ff0000000000001, // The value just above 1.0.
|
||||
0x3ff8000000000000, // 1.5
|
||||
0x4024000000000000, // 10
|
||||
0x7fefffffffffffff, // The largest finite value.
|
||||
|
||||
// Infinity.
|
||||
0x7ff0000000000000,
|
||||
|
||||
// NaNs.
|
||||
// - Quiet NaNs
|
||||
0x7ff923456789abcd,
|
||||
0x7ff8000000000000,
|
||||
// - Signalling NaNs
|
||||
0x7ff123456789abcd,
|
||||
0x7ff0000000000000,
|
||||
|
||||
// Subnormals.
|
||||
// - A recognisable bit pattern.
|
||||
0x000123456789abcd,
|
||||
// - The largest subnormal value.
|
||||
0x000fffffffffffff,
|
||||
// - The smallest subnormal value.
|
||||
0x0000000000000001,
|
||||
|
||||
// The same values again, but negated.
|
||||
0x8000000000000000,
|
||||
0x8010000000000000,
|
||||
0xbfdfffffffffffff,
|
||||
0xbfe0000000000000,
|
||||
0xbfe0000000000001,
|
||||
0xbfefffffffffffff,
|
||||
0xbff0000000000000,
|
||||
0xbff0000000000001,
|
||||
0xbff8000000000000,
|
||||
0xc024000000000000,
|
||||
0xffefffffffffffff,
|
||||
0xfff0000000000000,
|
||||
0xfff923456789abcd,
|
||||
0xfff8000000000000,
|
||||
0xfff123456789abcd,
|
||||
0xfff0000000000000,
|
||||
0x800123456789abcd,
|
||||
0x800fffffffffffff,
|
||||
0x8000000000000001,
|
||||
|
||||
|
||||
// TODO(jbramley): Add these inputs to the fcvt tests. They have little value in
|
||||
// other instructions and they will massively inflate the trace size.
|
||||
#if 0
|
||||
// Values relevant for conversion to single-precision floats.
|
||||
double_to_rawbits(FLT_MAX),
|
||||
// - The smallest normalized float.
|
||||
double_to_rawbits(pow(2, -126)),
|
||||
// - Normal floats that need (ties-to-even) rounding.
|
||||
// For normalized numbers, bit 29 (0x0000000020000000) is the lowest-order
|
||||
// bit which will fit in the float's mantissa.
|
||||
0x3ff0000000000000,
|
||||
0x3ff0000000000001,
|
||||
0x3ff0000010000000,
|
||||
0x3ff0000010000001,
|
||||
0x3ff0000020000000,
|
||||
0x3ff0000020000001,
|
||||
0x3ff0000030000000,
|
||||
0x3ff0000030000001,
|
||||
0x3ff0000040000000,
|
||||
0x3ff0000040000001,
|
||||
0x3ff0000050000000,
|
||||
0x3ff0000050000001,
|
||||
0x3ff0000060000000,
|
||||
// - A mantissa that overflows into the exponent during rounding.
|
||||
0x3feffffff0000000,
|
||||
// - The largest double that rounds to a normal float.
|
||||
0x47efffffefffffff,
|
||||
// - The smallest exponent that's too big for a float.
|
||||
double_to_rawbits(pow(2, 128)),
|
||||
// - This exponent is in range, but the value rounds to infinity.
|
||||
0x47effffff0000000,
|
||||
// - The largest double which is too small for a subnormal float.
|
||||
0x3690000000000000,
|
||||
// - The largest subnormal float.
|
||||
0x380fffffc0000000,
|
||||
// - The smallest subnormal float.
|
||||
0x36a0000000000000,
|
||||
// - Subnormal floats that need (ties-to-even) rounding.
|
||||
// For these subnormals, bit 34 (0x0000000400000000) is the lowest-order
|
||||
// bit which will fit in the float's mantissa.
|
||||
0x37c159e000000000,
|
||||
0x37c159e000000001,
|
||||
0x37c159e200000000,
|
||||
0x37c159e200000001,
|
||||
0x37c159e400000000,
|
||||
0x37c159e400000001,
|
||||
0x37c159e600000000,
|
||||
0x37c159e600000001,
|
||||
0x37c159e800000000,
|
||||
0x37c159e800000001,
|
||||
0x37c159ea00000000,
|
||||
0x37c159ea00000001,
|
||||
0x37c159ec00000000,
|
||||
// - The smallest double which rounds up to become a subnormal float.
|
||||
0x3690000000000001,
|
||||
#endif
|
||||
};
|
||||
static const size_t kInputDoubleCount =
|
||||
sizeof(kInputDouble) / sizeof(kInputDouble[0]);
|
||||
|
||||
|
||||
// Float values, stored as uint32_t representations. This ensures exact bit
|
||||
// representation, and avoids the loss of NaNs and suchlike through C++ casts.
|
||||
static const uint32_t kInputFloat[] = {
|
||||
// Simple values.
|
||||
0x00000000, // 0.0
|
||||
0x00800000, // The smallest normal value.
|
||||
0x3effffff, // The value just below 0.5.
|
||||
0x3f000000, // 0.5
|
||||
0x3f000001, // The value just above 0.5.
|
||||
0x3f7fffff, // The value just below 1.0.
|
||||
0x3f800000, // 1.0
|
||||
0x3f800001, // The value just above 1.0.
|
||||
0x3fc00000, // 1.5
|
||||
0x41200000, // 10
|
||||
0x7f8fffff, // The largest finite value.
|
||||
|
||||
// Infinity.
|
||||
0x7f800000,
|
||||
|
||||
// NaNs.
|
||||
// - Quiet NaNs
|
||||
0x7fd23456,
|
||||
0x7fc00000,
|
||||
// - Signalling NaNs
|
||||
0x7f923456,
|
||||
0x7f800001,
|
||||
|
||||
// Subnormals.
|
||||
// - A recognisable bit pattern.
|
||||
0x00123456,
|
||||
// - The largest subnormal value.
|
||||
0x007fffff,
|
||||
// - The smallest subnormal value.
|
||||
0x00000001,
|
||||
|
||||
// The same values again, but negated.
|
||||
0x80000000,
|
||||
0x80800000,
|
||||
0xbeffffff,
|
||||
0xbf000000,
|
||||
0xbf000001,
|
||||
0xbf7fffff,
|
||||
0xbf800000,
|
||||
0xbf800001,
|
||||
0xbfc00000,
|
||||
0xc1200000,
|
||||
0xff8fffff,
|
||||
0xff800000,
|
||||
0xffd23456,
|
||||
0xffc00000,
|
||||
0xff923456,
|
||||
0xff800001,
|
||||
0x80123456,
|
||||
0x807fffff,
|
||||
0x80000001,
|
||||
};
|
||||
static const size_t kInputFloatCount =
|
||||
sizeof(kInputFloat) / sizeof(kInputFloat[0]);
|
||||
|
462258
test/test-simulator-traces-a64.h
Normal file
462258
test/test-simulator-traces-a64.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -95,11 +95,11 @@ bool EqualFP64(double expected, const RegisterDump*, double result) {
|
||||
|
||||
|
||||
bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
|
||||
ASSERT(reg.Is32Bits());
|
||||
VIXL_ASSERT(reg.Is32Bits());
|
||||
// Retrieve the corresponding X register so we can check that the upper part
|
||||
// was properly cleared.
|
||||
int64_t result_x = core->xreg(reg.code());
|
||||
if ((result_x & 0xffffffff00000000L) != 0) {
|
||||
if ((result_x & 0xffffffff00000000) != 0) {
|
||||
printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
|
||||
expected, result_x);
|
||||
return false;
|
||||
@ -112,7 +112,7 @@ bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
|
||||
bool Equal64(uint64_t expected,
|
||||
const RegisterDump* core,
|
||||
const Register& reg) {
|
||||
ASSERT(reg.Is64Bits());
|
||||
VIXL_ASSERT(reg.Is64Bits());
|
||||
uint64_t result = core->xreg(reg.code());
|
||||
return Equal64(expected, core, result);
|
||||
}
|
||||
@ -121,11 +121,11 @@ bool Equal64(uint64_t expected,
|
||||
bool EqualFP32(float expected,
|
||||
const RegisterDump* core,
|
||||
const FPRegister& fpreg) {
|
||||
ASSERT(fpreg.Is32Bits());
|
||||
VIXL_ASSERT(fpreg.Is32Bits());
|
||||
// Retrieve the corresponding D register so we can check that the upper part
|
||||
// was properly cleared.
|
||||
uint64_t result_64 = core->dreg_bits(fpreg.code());
|
||||
if ((result_64 & 0xffffffff00000000L) != 0) {
|
||||
if ((result_64 & 0xffffffff00000000) != 0) {
|
||||
printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
|
||||
float_to_rawbits(expected), expected, result_64);
|
||||
return false;
|
||||
@ -138,7 +138,7 @@ bool EqualFP32(float expected,
|
||||
bool EqualFP64(double expected,
|
||||
const RegisterDump* core,
|
||||
const FPRegister& fpreg) {
|
||||
ASSERT(fpreg.Is64Bits());
|
||||
VIXL_ASSERT(fpreg.Is64Bits());
|
||||
return EqualFP64(expected, core, core->dreg(fpreg.code()));
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ bool EqualFP64(double expected,
|
||||
bool Equal64(const Register& reg0,
|
||||
const RegisterDump* core,
|
||||
const Register& reg1) {
|
||||
ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
|
||||
VIXL_ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
|
||||
int64_t expected = core->xreg(reg0.code());
|
||||
int64_t result = core->xreg(reg1.code());
|
||||
return Equal64(expected, core, result);
|
||||
@ -174,8 +174,8 @@ static char FlagV(uint32_t flags) {
|
||||
|
||||
|
||||
bool EqualNzcv(uint32_t expected, uint32_t result) {
|
||||
ASSERT((expected & ~NZCVFlag) == 0);
|
||||
ASSERT((result & ~NZCVFlag) == 0);
|
||||
VIXL_ASSERT((expected & ~NZCVFlag) == 0);
|
||||
VIXL_ASSERT((result & ~NZCVFlag) == 0);
|
||||
if (result != expected) {
|
||||
printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
|
||||
FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
|
||||
@ -215,7 +215,7 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
|
||||
RegList list = 0;
|
||||
int i = 0;
|
||||
for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
|
||||
if (((1UL << n) & allowed) != 0) {
|
||||
if (((UINT64_C(1) << n) & allowed) != 0) {
|
||||
// Only assign allowed registers.
|
||||
if (r) {
|
||||
r[i] = Register(n, reg_size);
|
||||
@ -226,12 +226,12 @@ RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
|
||||
if (w) {
|
||||
w[i] = Register(n, kWRegSize);
|
||||
}
|
||||
list |= (1UL << n);
|
||||
list |= (UINT64_C(1) << n);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
// Check that we got enough registers.
|
||||
ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
|
||||
VIXL_ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
|
||||
|
||||
return list;
|
||||
}
|
||||
@ -242,7 +242,7 @@ RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
|
||||
RegList list = 0;
|
||||
int i = 0;
|
||||
for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
|
||||
if (((1UL << n) & allowed) != 0) {
|
||||
if (((UINT64_C(1) << n) & allowed) != 0) {
|
||||
// Only assigned allowed registers.
|
||||
if (v) {
|
||||
v[i] = FPRegister(n, reg_size);
|
||||
@ -253,12 +253,12 @@ RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
|
||||
if (s) {
|
||||
s[i] = FPRegister(n, kSRegSize);
|
||||
}
|
||||
list |= (1UL << n);
|
||||
list |= (UINT64_C(1) << n);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
// Check that we got enough registers.
|
||||
ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
|
||||
VIXL_ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
|
||||
|
||||
return list;
|
||||
}
|
||||
@ -267,10 +267,10 @@ RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
|
||||
void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
|
||||
Register first = NoReg;
|
||||
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
|
||||
if (reg_list & (1UL << i)) {
|
||||
if (reg_list & (UINT64_C(1) << i)) {
|
||||
Register xn(i, kXRegSize);
|
||||
// We should never write into sp here.
|
||||
ASSERT(!xn.Is(sp));
|
||||
VIXL_ASSERT(!xn.Is(sp));
|
||||
if (!xn.IsZero()) {
|
||||
if (!first.IsValid()) {
|
||||
// This is the first register we've hit, so construct the literal.
|
||||
@ -290,7 +290,7 @@ void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
|
||||
void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
|
||||
FPRegister first = NoFPReg;
|
||||
for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
|
||||
if (reg_list & (1UL << i)) {
|
||||
if (reg_list & (UINT64_C(1) << i)) {
|
||||
FPRegister dn(i, kDRegSize);
|
||||
if (!first.IsValid()) {
|
||||
// This is the first register we've hit, so construct the literal.
|
||||
@ -314,20 +314,17 @@ void Clobber(MacroAssembler* masm, CPURegList reg_list) {
|
||||
// This will always clobber D registers.
|
||||
ClobberFP(masm, reg_list.list());
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RegisterDump::Dump(MacroAssembler* masm) {
|
||||
ASSERT(__ StackPointer().Is(sp));
|
||||
VIXL_ASSERT(__ StackPointer().Is(sp));
|
||||
|
||||
// Ensure that we don't unintentionally clobber any registers.
|
||||
Register old_tmp0 = __ Tmp0();
|
||||
Register old_tmp1 = __ Tmp1();
|
||||
FPRegister old_fptmp0 = __ FPTmp0();
|
||||
__ SetScratchRegisters(NoReg, NoReg);
|
||||
__ SetFPScratchRegister(NoFPReg);
|
||||
UseScratchRegisterScope temps(masm);
|
||||
temps.ExcludeAll();
|
||||
|
||||
// Preserve some temporary registers.
|
||||
Register dump_base = x0;
|
||||
@ -349,7 +346,7 @@ void RegisterDump::Dump(MacroAssembler* masm) {
|
||||
__ Push(xzr, dump_base, dump, tmp);
|
||||
|
||||
// Load the address where we will dump the state.
|
||||
__ Mov(dump_base, reinterpret_cast<uint64_t>(&dump_));
|
||||
__ Mov(dump_base, reinterpret_cast<uintptr_t>(&dump_));
|
||||
|
||||
// Dump the stack pointer (sp and wsp).
|
||||
// The stack pointer cannot be stored directly; it needs to be moved into
|
||||
@ -397,7 +394,7 @@ void RegisterDump::Dump(MacroAssembler* masm) {
|
||||
// easily restore them.
|
||||
Register dump2_base = x10;
|
||||
Register dump2 = x11;
|
||||
ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
|
||||
VIXL_ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
|
||||
|
||||
// Don't lose the dump_ address.
|
||||
__ Mov(dump2_base, dump_base);
|
||||
@ -418,10 +415,6 @@ void RegisterDump::Dump(MacroAssembler* masm) {
|
||||
__ Ldr(dump2_base, MemOperand(dump2, dump2_base.code() * kXRegSizeInBytes));
|
||||
__ Ldr(dump2, MemOperand(dump2, dump2.code() * kXRegSizeInBytes));
|
||||
|
||||
// Restore the MacroAssembler's scratch registers.
|
||||
__ SetScratchRegisters(old_tmp0, old_tmp1);
|
||||
__ SetFPScratchRegister(old_fptmp0);
|
||||
|
||||
completed_ = true;
|
||||
}
|
||||
|
||||
|
@ -40,12 +40,12 @@ namespace vixl {
|
||||
class RegisterDump {
|
||||
public:
|
||||
RegisterDump() : completed_(false) {
|
||||
ASSERT(sizeof(dump_.d_[0]) == kDRegSizeInBytes);
|
||||
ASSERT(sizeof(dump_.s_[0]) == kSRegSizeInBytes);
|
||||
ASSERT(sizeof(dump_.d_[0]) == kXRegSizeInBytes);
|
||||
ASSERT(sizeof(dump_.s_[0]) == kWRegSizeInBytes);
|
||||
ASSERT(sizeof(dump_.x_[0]) == kXRegSizeInBytes);
|
||||
ASSERT(sizeof(dump_.w_[0]) == kWRegSizeInBytes);
|
||||
VIXL_ASSERT(sizeof(dump_.d_[0]) == kDRegSizeInBytes);
|
||||
VIXL_ASSERT(sizeof(dump_.s_[0]) == kSRegSizeInBytes);
|
||||
VIXL_ASSERT(sizeof(dump_.d_[0]) == kXRegSizeInBytes);
|
||||
VIXL_ASSERT(sizeof(dump_.s_[0]) == kWRegSizeInBytes);
|
||||
VIXL_ASSERT(sizeof(dump_.x_[0]) == kXRegSizeInBytes);
|
||||
VIXL_ASSERT(sizeof(dump_.w_[0]) == kWRegSizeInBytes);
|
||||
}
|
||||
|
||||
// The Dump method generates code to store a snapshot of the register values.
|
||||
@ -62,7 +62,7 @@ class RegisterDump {
|
||||
if (code == kSPRegInternalCode) {
|
||||
return wspreg();
|
||||
}
|
||||
ASSERT(RegAliasesMatch(code));
|
||||
VIXL_ASSERT(RegAliasesMatch(code));
|
||||
return dump_.w_[code];
|
||||
}
|
||||
|
||||
@ -70,13 +70,13 @@ class RegisterDump {
|
||||
if (code == kSPRegInternalCode) {
|
||||
return spreg();
|
||||
}
|
||||
ASSERT(RegAliasesMatch(code));
|
||||
VIXL_ASSERT(RegAliasesMatch(code));
|
||||
return dump_.x_[code];
|
||||
}
|
||||
|
||||
// FPRegister accessors.
|
||||
inline uint32_t sreg_bits(unsigned code) const {
|
||||
ASSERT(FPRegAliasesMatch(code));
|
||||
VIXL_ASSERT(FPRegAliasesMatch(code));
|
||||
return dump_.s_[code];
|
||||
}
|
||||
|
||||
@ -85,7 +85,7 @@ class RegisterDump {
|
||||
}
|
||||
|
||||
inline uint64_t dreg_bits(unsigned code) const {
|
||||
ASSERT(FPRegAliasesMatch(code));
|
||||
VIXL_ASSERT(FPRegAliasesMatch(code));
|
||||
return dump_.d_[code];
|
||||
}
|
||||
|
||||
@ -95,19 +95,19 @@ class RegisterDump {
|
||||
|
||||
// Stack pointer accessors.
|
||||
inline int64_t spreg() const {
|
||||
ASSERT(SPRegAliasesMatch());
|
||||
VIXL_ASSERT(SPRegAliasesMatch());
|
||||
return dump_.sp_;
|
||||
}
|
||||
|
||||
inline int64_t wspreg() const {
|
||||
ASSERT(SPRegAliasesMatch());
|
||||
VIXL_ASSERT(SPRegAliasesMatch());
|
||||
return dump_.wsp_;
|
||||
}
|
||||
|
||||
// Flags accessors.
|
||||
inline uint64_t flags_nzcv() const {
|
||||
ASSERT(IsComplete());
|
||||
ASSERT((dump_.flags_ & ~Flags_mask) == 0);
|
||||
VIXL_ASSERT(IsComplete());
|
||||
VIXL_ASSERT((dump_.flags_ & ~Flags_mask) == 0);
|
||||
return dump_.flags_ & Flags_mask;
|
||||
}
|
||||
|
||||
@ -123,21 +123,21 @@ class RegisterDump {
|
||||
// w<code>. A failure of this test most likely represents a failure in the
|
||||
// ::Dump method, or a failure in the simulator.
|
||||
bool RegAliasesMatch(unsigned code) const {
|
||||
ASSERT(IsComplete());
|
||||
ASSERT(code < kNumberOfRegisters);
|
||||
VIXL_ASSERT(IsComplete());
|
||||
VIXL_ASSERT(code < kNumberOfRegisters);
|
||||
return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
|
||||
}
|
||||
|
||||
// As RegAliasesMatch, but for the stack pointer.
|
||||
bool SPRegAliasesMatch() const {
|
||||
ASSERT(IsComplete());
|
||||
VIXL_ASSERT(IsComplete());
|
||||
return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
|
||||
}
|
||||
|
||||
// As RegAliasesMatch, but for floating-point registers.
|
||||
bool FPRegAliasesMatch(unsigned code) const {
|
||||
ASSERT(IsComplete());
|
||||
ASSERT(code < kNumberOfFPRegisters);
|
||||
VIXL_ASSERT(IsComplete());
|
||||
VIXL_ASSERT(code < kNumberOfFPRegisters);
|
||||
return (dump_.d_[code] & kSRegMask) == dump_.s_[code];
|
||||
}
|
||||
|
||||
@ -214,7 +214,7 @@ RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
|
||||
// top word anyway, so clobbering the full X registers should make tests more
|
||||
// rigorous.
|
||||
void Clobber(MacroAssembler* masm, RegList reg_list,
|
||||
uint64_t const value = 0xfedcba9876543210UL);
|
||||
uint64_t const value = 0xfedcba9876543210);
|
||||
|
||||
// As Clobber, but for FP registers.
|
||||
void ClobberFP(MacroAssembler* masm, RegList reg_list,
|
||||
|
97
tools/generate_simulator_traces.py
Executable file
97
tools/generate_simulator_traces.py
Executable file
@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python2.7
|
||||
|
||||
# Copyright 2014, ARM Limited
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
# * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
# used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import re
|
||||
import util
|
||||
|
||||
def BuildOptions(root):
|
||||
result = argparse.ArgumentParser(description = 'Simulator test generator.')
|
||||
result.add_argument('--cctest', action='store', default=root+'/cctest',
|
||||
help='The cctest executable to run.')
|
||||
result.add_argument('--out', action='store',
|
||||
default='test/test-simulator-traces-a64.h')
|
||||
return result.parse_args()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# $ROOT/tools/generate_simulator_traces.py
|
||||
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
|
||||
os.chdir(root_dir)
|
||||
|
||||
args = BuildOptions(root_dir)
|
||||
|
||||
# Run each simulator test (SIM_*) with the --sim_test_trace option, and use
|
||||
# the output to update the traces header (from --out). The existing traces are
|
||||
# wholly replaced, but some boilerplate code exists in the header, so we find
|
||||
# the start of the traces, truncate the file to that point, then add the new
|
||||
# (updated) trace data.
|
||||
|
||||
# Find the output section of the traces file.
|
||||
marker = [
|
||||
'// ---------------------------------------------------------------------',
|
||||
'// Expected outputs.',
|
||||
'// Everything below this point is automatically generated.',
|
||||
'//',
|
||||
'// PLEASE DO NOT EDIT ANYTHING BELOW THIS COMMENT.',
|
||||
'// ---------------------------------------------------------------------',
|
||||
]
|
||||
matched = 0
|
||||
f = open(args.out, 'r+')
|
||||
# Use readline (rather than for..in) so we can truncate at the right place.
|
||||
line = f.readline();
|
||||
while line:
|
||||
if line.strip() == marker[matched]:
|
||||
matched = matched + 1
|
||||
if matched == len(marker):
|
||||
f.truncate()
|
||||
break
|
||||
else:
|
||||
matched = 0
|
||||
line = f.readline()
|
||||
|
||||
if matched != len(marker):
|
||||
util.abort('Failed to find output section in ' + args.out + '.')
|
||||
|
||||
# Find the simulator tests.
|
||||
status, output = util.getstatusoutput(args.cctest + ' --list')
|
||||
if status != 0: util.abort('Failed to list all tests')
|
||||
tests = filter(lambda t: 'SIM_' in t, output.split())
|
||||
|
||||
# Run each test.
|
||||
for test in tests:
|
||||
cmd = ' '.join([args.cctest, '--sim_test_trace', test])
|
||||
status, output = util.getstatusoutput(cmd)
|
||||
if status != 0: util.abort('Failed to run ' + cmd + '.')
|
||||
|
||||
f.write('\n\n' + output)
|
||||
|
||||
f.write('\n\n')
|
||||
f.close()
|
Loading…
x
Reference in New Issue
Block a user