Enable -Wshadow and fix resulting errors

Prompted by https://github.com/Linaro/vixl/pull/9 enabled -Wshadow generally.
This commit is contained in:
Martyn Capewell 2022-01-07 16:38:14 +00:00
parent 9693325360
commit 6e8db23503
16 changed files with 638 additions and 352 deletions

View File

@ -84,6 +84,7 @@ options = {
'-pedantic',
'-Wwrite-strings',
'-Wunused',
'-Wshadow',
'-Wno-missing-noreturn'],
'CPPPATH' : [config.dir_src_vixl]
},

View File

@ -73,11 +73,11 @@ class CustomDisassembler : public PrintDisassembler {
CustomStream* GetStream() const {
return reinterpret_cast<CustomStream*>(&os());
}
virtual void PrintCodeAddress(uint32_t pc) VIXL_OVERRIDE {
virtual void PrintCodeAddress(uint32_t addr) VIXL_OVERRIDE {
// If the address matches a label, then print the label. Otherwise, print
// nothing.
std::map<Location::Offset, const char*>::iterator symbol =
GetStream()->GetSymbols().find(pc);
GetStream()->GetSymbols().find(addr);
if (symbol != GetStream()->GetSymbols().end()) {
os().os() << symbol->second << ":" << std::endl;
}

View File

@ -2557,10 +2557,11 @@ void Assembler::adr(Condition cond,
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= 0) && (offset <= 1020) &&
((offset & 0x3) == 0));
const int32_t target = offset >> 2;
@ -2588,10 +2589,11 @@ void Assembler::adr(Condition cond,
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
int32_t target;
if ((offset >= 0) && (offset <= 4095)) {
target = offset;
@ -2622,10 +2624,11 @@ void Assembler::adr(Condition cond,
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
int32_t target;
ImmediateA32 positive_immediate_a32(offset);
if (positive_immediate_a32.IsValid()) {
@ -3024,10 +3027,10 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - pc;
program_counter += kT32PcDelta;
Location::Offset offset = location->GetLocation() - program_counter;
VIXL_ASSERT((offset >= -256) && (offset <= 254) &&
((offset & 0x1) == 0));
const int32_t target = offset >> 1;
@ -3051,10 +3054,10 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - pc;
program_counter += kT32PcDelta;
Location::Offset offset = location->GetLocation() - program_counter;
VIXL_ASSERT((offset >= -2048) && (offset <= 2046) &&
((offset & 0x1) == 0));
const int32_t target = offset >> 1;
@ -3075,10 +3078,10 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - pc;
program_counter += kT32PcDelta;
Location::Offset offset = location->GetLocation() - program_counter;
VIXL_ASSERT((offset >= -1048576) && (offset <= 1048574) &&
((offset & 0x1) == 0));
const int32_t target = offset >> 1;
@ -3104,10 +3107,10 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - pc;
program_counter += kT32PcDelta;
Location::Offset offset = location->GetLocation() - program_counter;
VIXL_ASSERT((offset >= -16777216) && (offset <= 16777214) &&
((offset & 0x1) == 0));
int32_t target = offset >> 1;
@ -3132,10 +3135,10 @@ void Assembler::b(Condition cond, EncodingSize size, Location* location) {
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - pc;
program_counter += kA32PcDelta;
Location::Offset offset = location->GetLocation() - program_counter;
VIXL_ASSERT((offset >= -33554432) && (offset <= 33554428) &&
((offset & 0x3) == 0));
const int32_t target = offset >> 2;
@ -3462,10 +3465,10 @@ void Assembler::bl(Condition cond, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - pc;
program_counter += kT32PcDelta;
Location::Offset offset = location->GetLocation() - program_counter;
VIXL_ASSERT((offset >= -16777216) && (offset <= 16777214) &&
((offset & 0x1) == 0));
int32_t target = offset >> 1;
@ -3490,10 +3493,10 @@ void Assembler::bl(Condition cond, Location* location) {
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - pc;
program_counter += kA32PcDelta;
Location::Offset offset = location->GetLocation() - program_counter;
VIXL_ASSERT((offset >= -33554432) && (offset <= 33554428) &&
((offset & 0x3) == 0));
const int32_t target = offset >> 2;
@ -3549,10 +3552,11 @@ void Assembler::blx(Condition cond, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -16777216) && (offset <= 16777212) &&
((offset & 0x3) == 0));
int32_t target = offset >> 2;
@ -3577,12 +3581,12 @@ void Assembler::blx(Condition cond, Location* location) {
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const
VIXL_OVERRIDE {
pc += kA32PcDelta;
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(pc, 4);
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -33554432) && (offset <= 33554430) &&
((offset & 0x1) == 0));
const int32_t target = offset >> 1;
@ -3698,10 +3702,10 @@ void Assembler::cbnz(Register rn, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - pc;
program_counter += kT32PcDelta;
Location::Offset offset = location->GetLocation() - program_counter;
VIXL_ASSERT((offset >= 0) && (offset <= 126) &&
((offset & 0x1) == 0));
const int32_t target = offset >> 1;
@ -3748,10 +3752,10 @@ void Assembler::cbz(Register rn, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - pc;
program_counter += kT32PcDelta;
Location::Offset offset = location->GetLocation() - program_counter;
VIXL_ASSERT((offset >= 0) && (offset <= 126) &&
((offset & 0x1) == 0));
const int32_t target = offset >> 1;
@ -5208,10 +5212,11 @@ void Assembler::ldr(Condition cond,
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= 0) && (offset <= 1020) &&
((offset & 0x3) == 0));
const int32_t target = offset >> 2;
@ -5233,10 +5238,11 @@ void Assembler::ldr(Condition cond,
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -5259,10 +5265,11 @@ void Assembler::ldr(Condition cond,
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -5505,10 +5512,11 @@ void Assembler::ldrb(Condition cond, Register rt, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -5531,10 +5539,11 @@ void Assembler::ldrb(Condition cond, Register rt, Location* location) {
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -5747,10 +5756,11 @@ void Assembler::ldrd(Condition cond,
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -1020) && (offset <= 1020) &&
((offset & 0x3) == 0));
int32_t target = offset >> 2;
@ -5777,10 +5787,11 @@ void Assembler::ldrd(Condition cond,
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -255) && (offset <= 255));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 8);
@ -6129,10 +6140,11 @@ void Assembler::ldrh(Condition cond, Register rt, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -6155,10 +6167,11 @@ void Assembler::ldrh(Condition cond, Register rt, Location* location) {
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -255) && (offset <= 255));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 8);
@ -6382,10 +6395,11 @@ void Assembler::ldrsb(Condition cond, Register rt, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -6408,10 +6422,11 @@ void Assembler::ldrsb(Condition cond, Register rt, Location* location) {
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -255) && (offset <= 255));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 8);
@ -6635,10 +6650,11 @@ void Assembler::ldrsh(Condition cond, Register rt, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -6661,10 +6677,11 @@ void Assembler::ldrsh(Condition cond, Register rt, Location* location) {
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -255) && (offset <= 255));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 8);
@ -8039,10 +8056,11 @@ void Assembler::pld(Condition cond, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -8062,12 +8080,12 @@ void Assembler::pld(Condition cond, Location* location) {
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const
VIXL_OVERRIDE {
pc += kA32PcDelta;
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(pc, 4);
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -8403,10 +8421,11 @@ void Assembler::pli(Condition cond, Location* location) {
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -8426,12 +8445,12 @@ void Assembler::pli(Condition cond, Location* location) {
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const
VIXL_OVERRIDE {
pc += kA32PcDelta;
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(pc, 4);
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -4095) && (offset <= 4095));
uint32_t U = (offset >= 0);
int32_t target = abs(offset) | (U << 12);
@ -19616,10 +19635,11 @@ void Assembler::vldr(Condition cond,
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -1020) && (offset <= 1020) &&
((offset & 0x3) == 0));
int32_t target = offset >> 2;
@ -19646,10 +19666,11 @@ void Assembler::vldr(Condition cond,
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -1020) && (offset <= 1020) &&
((offset & 0x3) == 0));
int32_t target = offset >> 2;
@ -19770,10 +19791,11 @@ void Assembler::vldr(Condition cond,
public:
EmitOp() : Location::EmitOperator(T32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kT32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kT32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -1020) && (offset <= 1020) &&
((offset & 0x3) == 0));
int32_t target = offset >> 2;
@ -19800,10 +19822,11 @@ void Assembler::vldr(Condition cond,
public:
EmitOp() : Location::EmitOperator(A32) {}
virtual uint32_t Encode(uint32_t instr,
Location::Offset pc,
Location::Offset program_counter,
const Location* location) const VIXL_OVERRIDE {
pc += kA32PcDelta;
Location::Offset offset = location->GetLocation() - AlignDown(pc, 4);
program_counter += kA32PcDelta;
Location::Offset offset =
location->GetLocation() - AlignDown(program_counter, 4);
VIXL_ASSERT((offset >= -1020) && (offset <= 1020) &&
((offset & 0x3) == 0));
int32_t target = offset >> 2;

View File

@ -402,13 +402,13 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
VIXL_ASSERT(GetBuffer()->Is32bitAligned());
}
// If we need to add padding, check if we have to emit the pool.
const int32_t pc = GetCursorOffset();
if (label->Needs16BitPadding(pc)) {
const int32_t cursor = GetCursorOffset();
if (label->Needs16BitPadding(cursor)) {
const int kPaddingBytes = 2;
if (pool_manager_.MustEmit(pc, kPaddingBytes)) {
int32_t new_pc = pool_manager_.Emit(this, pc, kPaddingBytes);
USE(new_pc);
VIXL_ASSERT(new_pc == GetCursorOffset());
if (pool_manager_.MustEmit(cursor, kPaddingBytes)) {
int32_t new_cursor = pool_manager_.Emit(this, cursor, kPaddingBytes);
USE(new_cursor);
VIXL_ASSERT(new_cursor == GetCursorOffset());
}
}
pool_manager_.Bind(this, label, GetCursorOffset());
@ -430,30 +430,30 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
Location* location,
Condition* cond = NULL) {
int size = info->size;
int32_t pc = GetCursorOffset();
int32_t cursor = GetCursorOffset();
// If we need to emit a branch over the instruction, take this into account.
if ((cond != NULL) && NeedBranch(cond)) {
size += kBranchSize;
pc += kBranchSize;
cursor += kBranchSize;
}
int32_t from = pc;
int32_t from = cursor;
from += IsUsingT32() ? kT32PcDelta : kA32PcDelta;
if (info->pc_needs_aligning) from = AlignDown(from, 4);
int32_t min = from + info->min_offset;
int32_t max = from + info->max_offset;
ForwardReference<int32_t> temp_ref(pc,
ForwardReference<int32_t> temp_ref(cursor,
info->size,
min,
max,
info->alignment);
if (pool_manager_.MustEmit(GetCursorOffset(), size, &temp_ref, location)) {
int32_t new_pc = pool_manager_.Emit(this,
GetCursorOffset(),
info->size,
&temp_ref,
location);
USE(new_pc);
VIXL_ASSERT(new_pc == GetCursorOffset());
int32_t new_cursor = pool_manager_.Emit(this,
GetCursorOffset(),
info->size,
&temp_ref,
location);
USE(new_cursor);
VIXL_ASSERT(new_cursor == GetCursorOffset());
}
}
@ -464,13 +464,13 @@ class MacroAssembler : public Assembler, public MacroAssemblerInterface {
// into account, as well as potential 16-bit padding needed to reach the
// minimum accessible location.
int alignment = literal->GetMaxAlignment();
int32_t pc = GetCursorOffset();
int total_size = AlignUp(pc, alignment) - pc + literal->GetSize();
if (literal->Needs16BitPadding(pc)) total_size += 2;
if (pool_manager_.MustEmit(pc, total_size)) {
int32_t new_pc = pool_manager_.Emit(this, pc, total_size);
USE(new_pc);
VIXL_ASSERT(new_pc == GetCursorOffset());
int32_t cursor = GetCursorOffset();
int total_size = AlignUp(cursor, alignment) - cursor + literal->GetSize();
if (literal->Needs16BitPadding(cursor)) total_size += 2;
if (pool_manager_.MustEmit(cursor, total_size)) {
int32_t new_cursor = pool_manager_.Emit(this, cursor, total_size);
USE(new_cursor);
VIXL_ASSERT(new_cursor == GetCursorOffset());
}
pool_manager_.Bind(this, literal, GetCursorOffset());
literal->EmitPoolObject(this);

View File

@ -4794,9 +4794,9 @@ void Assembler::SVESt1VecScaHelper(const ZRegister& zt,
VIXL_ASSERT(zn.IsLaneSizeS() || zn.IsLaneSizeD());
VIXL_ASSERT(AreSameLaneSize(zn, zt));
uint32_t b22 = zn.IsLaneSizeS() ? (1 << 22) : 0;
uint32_t bit22 = zn.IsLaneSizeS() ? (1 << 22) : 0;
Instr op = 0xe4002000; // STNT1 with vector plus scalar addressing mode.
op |= b22 | (msize_bytes_log2 << 23);
op |= bit22 | (msize_bytes_log2 << 23);
Emit(op | Rt(zt) | PgLow8(pg) |
SVEMemOperandHelper(msize_bytes_log2, 1, addr, true));
}

View File

@ -10317,10 +10317,10 @@ int Disassembler::SubstitutePrefetchField(const Instruction *instr,
int placeholder_length = is_sve ? 9 : 6;
static const char *stream_options[] = {"keep", "strm"};
auto get_hints = [](bool is_sve) -> std::vector<std::string> {
auto get_hints = [](bool want_sve_hint) -> std::vector<std::string> {
static const std::vector<std::string> sve_hints = {"ld", "st"};
static const std::vector<std::string> core_hints = {"ld", "li", "st"};
return (is_sve) ? sve_hints : core_hints;
return (want_sve_hint) ? sve_hints : core_hints;
};
std::vector<std::string> hints = get_hints(is_sve);

View File

@ -6064,14 +6064,14 @@ LogicVRegister Simulator::fcvtxn2(VectorFormat vform,
// Based on reference C function recip_sqrt_estimate from ARM ARM.
double Simulator::recip_sqrt_estimate(double a) {
int q0, q1, s;
int quot0, quot1, s;
double r;
if (a < 0.5) {
q0 = static_cast<int>(a * 512.0);
r = 1.0 / sqrt((static_cast<double>(q0) + 0.5) / 512.0);
quot0 = static_cast<int>(a * 512.0);
r = 1.0 / sqrt((static_cast<double>(quot0) + 0.5) / 512.0);
} else {
q1 = static_cast<int>(a * 256.0);
r = 1.0 / sqrt((static_cast<double>(q1) + 0.5) / 256.0);
quot1 = static_cast<int>(a * 256.0);
r = 1.0 / sqrt((static_cast<double>(quot1) + 0.5) / 256.0);
}
s = static_cast<int>(256.0 * r + 0.5);
return static_cast<double>(s) / 256.0;
@ -6239,7 +6239,6 @@ T Simulator::FPRecipEstimate(T op, FPRounding rounding) {
} else {
uint64_t fraction;
int exp, result_exp;
uint32_t sign;
if (IsFloat16<T>()) {
sign = Float16Sign(op);
@ -6779,8 +6778,8 @@ LogicVRegister Simulator::fscale(VectorFormat vform,
const LogicVRegister& src2) {
T two = T(2.0);
for (int i = 0; i < LaneCountFromFormat(vform); i++) {
T s1 = src1.Float<T>(i);
if (!IsNaN(s1)) {
T src1_val = src1.Float<T>(i);
if (!IsNaN(src1_val)) {
int64_t scale = src2.Int(vform, i);
// TODO: this is a low-performance implementation, but it's simple and
// less likely to be buggy. Consider replacing it with something faster.
@ -6789,19 +6788,19 @@ LogicVRegister Simulator::fscale(VectorFormat vform,
// point iterating further.
scale = std::min<int64_t>(std::max<int64_t>(scale, -2048), 2048);
// Compute s1 * 2 ^ scale. If scale is positive, multiply by two and
// Compute src1_val * 2 ^ scale. If scale is positive, multiply by two and
// decrement scale until it's zero.
while (scale-- > 0) {
s1 = FPMul(s1, two);
src1_val = FPMul(src1_val, two);
}
// If scale is negative, divide by two and increment scale until it's
// zero. Initially, scale is (src2 - 1), so we pre-increment.
while (++scale < 0) {
s1 = FPDiv(s1, two);
src1_val = FPDiv(src1_val, two);
}
}
dst.SetFloat<T>(i, s1);
dst.SetFloat<T>(i, src1_val);
}
return dst;
}

View File

@ -133,14 +133,14 @@ void LiteralPool::Emit(EmitOption option) {
masm_->SetAllowMacroInstructions(false);
#endif
if (option == kBranchRequired) {
ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize);
ExactAssemblyScopeWithoutPoolsCheck eas_guard(masm_, kInstructionSize);
masm_->b(&end_of_pool);
}
{
// Marker indicating the size of the literal pool in 32-bit words.
VIXL_ASSERT((pool_size % kWRegSizeInBytes) == 0);
ExactAssemblyScopeWithoutPoolsCheck guard(masm_, kInstructionSize);
ExactAssemblyScopeWithoutPoolsCheck eas_guard(masm_, kInstructionSize);
masm_->ldr(xzr, static_cast<int>(pool_size / kWRegSizeInBytes));
}

View File

@ -2207,7 +2207,7 @@ TEST(custom_literal_place_shared) {
VIXL_CHECK(!after.IsBound());
// Load the entries several times to test that literals can be shared.
for (int i = 0; i < 20; i++) {
for (int j = 0; j < 20; j++) {
(masm.*test_case.instruction)(r0, &before);
(masm.*test_case.instruction)(r1, &after);
}
@ -5160,7 +5160,7 @@ TEST_T32(veneer_and_literal5) {
int first_test = 2000;
// Test on both sizes of the Adr range which is 4095.
for (int test = 0; test < kTestCount; test++) {
for (int test_num = 0; test_num < kTestCount; test_num++) {
const int string_size = 1000; // A lot more than the cbz range.
std::string test_string(string_size, 'x');
StringLiteral big_literal(test_string.c_str());
@ -5168,7 +5168,7 @@ TEST_T32(veneer_and_literal5) {
__ Adr(r11, &big_literal);
{
int num_nops = first_test + test;
int num_nops = first_test + test_num;
ExactAssemblyScope aas(&masm,
2 * num_nops,
CodeBufferCheckScope::kMaximumSize);
@ -5177,15 +5177,15 @@ TEST_T32(veneer_and_literal5) {
}
}
__ Cbz(r1, &labels[test]);
__ Cbz(r1, &labels[test_num]);
{
ExactAssemblyScope aas(&masm, 4, CodeBufferCheckScope::kMaximumSize);
__ add(r1, r1, 3);
}
__ Bind(&labels[test]);
__ Bind(&labels[test_num]);
// Emit the literal pool if it has not beeen emitted (it's the case for
// the lower values of test).
// the lower values of test_num).
__ EmitLiteralPool(PoolManager<int32_t>::kBranchRequired);
}
@ -6476,61 +6476,65 @@ TEST_T32(assembler_bind_label) {
POSITIVE_TEST_FORWARD_REFERENCE_INFO(INST, INFO, ASM)
#endif
#define POSITIVE_TEST_FORWARD_REFERENCE_INFO(INST, INFO, ASM) \
can_encode = masm.INFO; \
VIXL_CHECK(can_encode); \
{ \
ExactAssemblyScope scope(&masm, \
info->size, \
ExactAssemblyScope::kExactSize); \
int32_t pc = masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
pc = AlignDown(pc, 4); \
} \
Label label(pc + info->min_offset); \
masm.ASM; \
} \
{ \
ExactAssemblyScope scope(&masm, \
info->size, \
ExactAssemblyScope::kExactSize); \
int32_t pc = masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
pc = AlignDown(pc, 4); \
} \
Label label(pc + info->max_offset); \
masm.ASM; \
#define POSITIVE_TEST_FORWARD_REFERENCE_INFO(INST, INFO, ASM) \
can_encode = masm.INFO; \
VIXL_CHECK(can_encode); \
{ \
ExactAssemblyScope scope(&masm, \
info->size, \
ExactAssemblyScope::kExactSize); \
int32_t program_counter = \
masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
program_counter = AlignDown(program_counter, 4); \
} \
Label label(program_counter + info->min_offset); \
masm.ASM; \
} \
{ \
ExactAssemblyScope scope(&masm, \
info->size, \
ExactAssemblyScope::kExactSize); \
int32_t program_counter = \
masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
program_counter = AlignDown(program_counter, 4); \
} \
Label label(program_counter + info->max_offset); \
masm.ASM; \
}
#ifdef VIXL_NEGATIVE_TESTING
#define NEGATIVE_TEST_FORWARD_REFERENCE_INFO(INST, ASM) \
try { \
ExactAssemblyScope scope(&masm, \
info->size, \
ExactAssemblyScope::kMaximumSize); \
int32_t pc = masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
pc = AlignDown(pc, 4); \
} \
Label label(pc + info->max_offset + info->alignment); \
masm.ASM; \
printf("Negative test for forward reference failed for %s.\n", INST); \
abort(); \
} catch (const std::runtime_error&) { \
} \
try { \
ExactAssemblyScope scope(&masm, \
info->size, \
ExactAssemblyScope::kMaximumSize); \
int32_t pc = masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
pc = AlignDown(pc, 4); \
} \
Label label(pc + info->min_offset - info->alignment); \
masm.ASM; \
printf("Negative test for forward reference failed for %s.\n", INST); \
abort(); \
} catch (const std::runtime_error&) { \
#define NEGATIVE_TEST_FORWARD_REFERENCE_INFO(INST, ASM) \
try { \
ExactAssemblyScope scope(&masm, \
info->size, \
ExactAssemblyScope::kMaximumSize); \
int32_t program_counter = \
masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
program_counter = AlignDown(program_counter, 4); \
} \
Label label(program_counter + info->max_offset + info->alignment); \
masm.ASM; \
printf("Negative test for forward reference failed for %s.\n", INST); \
abort(); \
} catch (const std::runtime_error&) { \
} \
try { \
ExactAssemblyScope scope(&masm, \
info->size, \
ExactAssemblyScope::kMaximumSize); \
int32_t program_counter = \
masm.GetCursorOffset() + __ GetArchitectureStatePCOffset(); \
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc) { \
program_counter = AlignDown(program_counter, 4); \
} \
Label label(program_counter + info->min_offset - info->alignment); \
masm.ASM; \
printf("Negative test for forward reference failed for %s.\n", INST); \
abort(); \
} catch (const std::runtime_error&) { \
}
#else
#define NEGATIVE_TEST_FORWARD_REFERENCE_INFO(INST, ASM)

View File

@ -348,8 +348,9 @@ namespace aarch32 {
class TestDisassembler : public PrintDisassembler {
public:
TestDisassembler(std::ostream& os, uint32_t pc) // NOLINT(runtime/references)
: PrintDisassembler(os, pc) {}
TestDisassembler(std::ostream& os,
uint32_t program_counter) // NOLINT(runtime/references)
: PrintDisassembler(os, program_counter) {}
virtual void PrintCodeAddress(uint32_t code_address) VIXL_OVERRIDE {
USE(code_address);

View File

@ -164,7 +164,7 @@ namespace aarch64 {
{ \
/* We expect the test to use all of the features it requested, plus the */ \
/* features that the instructure code requires. */ \
CPUFeatures const& expected = \
CPUFeatures const& expected_features = \
simulator.GetCPUFeatures()->With(CPUFeatures::kNEON); \
CPUFeatures const& seen = simulator.GetSeenFeatures(); \
/* This gives three broad categories of features that we care about: */ \
@ -172,13 +172,13 @@ namespace aarch64 {
/* 2. Things seen, but not expected. The simulator catches these. */ \
/* 3. Things expected, but not seen. We check these here. */ \
/* In a valid, passing test, categories 2 and 3 should be empty. */ \
if (seen != expected) { \
if (seen != expected_features) { \
/* The Simulator should have caught anything in category 2 already. */ \
VIXL_ASSERT(expected.Has(seen)); \
VIXL_ASSERT(expected_features.Has(seen)); \
/* Anything left is category 3: things expected, but not seen. This */ \
/* is not necessarily a bug in VIXL itself, but indicates that the */ \
/* test is less strict than it could be. */ \
CPUFeatures missing = expected.Without(seen); \
CPUFeatures missing = expected_features.Without(seen); \
VIXL_ASSERT(missing.Count() > 0); \
std::cout << "Error: expected to see CPUFeatures { " << missing \
<< " }\n"; \
@ -265,15 +265,15 @@ namespace aarch64 {
if (Test::disassemble()) { \
PrintDisassembler disasm(stdout); \
CodeBuffer* buffer = masm.GetBuffer(); \
Instruction* start = buffer->GetOffsetAddress<Instruction*>( \
Instruction* test_start = buffer->GetOffsetAddress<Instruction*>( \
offset_after_infrastructure_start); \
Instruction* end = buffer->GetOffsetAddress<Instruction*>( \
Instruction* test_end = buffer->GetOffsetAddress<Instruction*>( \
offset_before_infrastructure_end); \
\
if (Test::disassemble_infrastructure()) { \
Instruction* infra_start = buffer->GetStartAddress<Instruction*>(); \
printf("# Infrastructure code (prologue)\n"); \
disasm.DisassembleBuffer(infra_start, start); \
disasm.DisassembleBuffer(infra_start, test_start); \
printf("# Test code\n"); \
} else { \
printf( \
@ -281,12 +281,12 @@ namespace aarch64 {
"Use --disassemble to see it.\n"); \
} \
\
disasm.DisassembleBuffer(start, end); \
disasm.DisassembleBuffer(test_start, test_end); \
\
if (Test::disassemble_infrastructure()) { \
printf("# Infrastructure code (epilogue)\n"); \
Instruction* infra_end = buffer->GetEndAddress<Instruction*>(); \
disasm.DisassembleBuffer(end, infra_end); \
disasm.DisassembleBuffer(test_end, infra_end); \
} \
}

View File

@ -905,95 +905,209 @@ TEST(fmadd_fmsub_float) {
TEST(fmadd_fmsub_double_nans) {
// Make sure that NaN propagation works correctly.
double s1 = RawbitsToDouble(0x7ff5555511111111);
double s2 = RawbitsToDouble(0x7ff5555522222222);
double sa = RawbitsToDouble(0x7ff55555aaaaaaaa);
double q1 = RawbitsToDouble(0x7ffaaaaa11111111);
double q2 = RawbitsToDouble(0x7ffaaaaa22222222);
double qa = RawbitsToDouble(0x7ffaaaaaaaaaaaaa);
VIXL_ASSERT(IsSignallingNaN(s1));
VIXL_ASSERT(IsSignallingNaN(s2));
VIXL_ASSERT(IsSignallingNaN(sa));
VIXL_ASSERT(IsQuietNaN(q1));
VIXL_ASSERT(IsQuietNaN(q2));
VIXL_ASSERT(IsQuietNaN(qa));
double sig1 = RawbitsToDouble(0x7ff5555511111111);
double sig2 = RawbitsToDouble(0x7ff5555522222222);
double siga = RawbitsToDouble(0x7ff55555aaaaaaaa);
double qui1 = RawbitsToDouble(0x7ffaaaaa11111111);
double qui2 = RawbitsToDouble(0x7ffaaaaa22222222);
double quia = RawbitsToDouble(0x7ffaaaaaaaaaaaaa);
VIXL_ASSERT(IsSignallingNaN(sig1));
VIXL_ASSERT(IsSignallingNaN(sig2));
VIXL_ASSERT(IsSignallingNaN(siga));
VIXL_ASSERT(IsQuietNaN(qui1));
VIXL_ASSERT(IsQuietNaN(qui2));
VIXL_ASSERT(IsQuietNaN(quia));
// The input NaNs after passing through ProcessNaN.
double s1_proc = RawbitsToDouble(0x7ffd555511111111);
double s2_proc = RawbitsToDouble(0x7ffd555522222222);
double sa_proc = RawbitsToDouble(0x7ffd5555aaaaaaaa);
double q1_proc = q1;
double q2_proc = q2;
double qa_proc = qa;
VIXL_ASSERT(IsQuietNaN(s1_proc));
VIXL_ASSERT(IsQuietNaN(s2_proc));
VIXL_ASSERT(IsQuietNaN(sa_proc));
VIXL_ASSERT(IsQuietNaN(q1_proc));
VIXL_ASSERT(IsQuietNaN(q2_proc));
VIXL_ASSERT(IsQuietNaN(qa_proc));
double sig1_proc = RawbitsToDouble(0x7ffd555511111111);
double sig2_proc = RawbitsToDouble(0x7ffd555522222222);
double siga_proc = RawbitsToDouble(0x7ffd5555aaaaaaaa);
double qui1_proc = qui1;
double qui2_proc = qui2;
double quia_proc = quia;
VIXL_ASSERT(IsQuietNaN(sig1_proc));
VIXL_ASSERT(IsQuietNaN(sig2_proc));
VIXL_ASSERT(IsQuietNaN(siga_proc));
VIXL_ASSERT(IsQuietNaN(qui1_proc));
VIXL_ASSERT(IsQuietNaN(qui2_proc));
VIXL_ASSERT(IsQuietNaN(quia_proc));
// Negated NaNs as it would be done on ARMv8 hardware.
double s1_proc_neg = RawbitsToDouble(0xfffd555511111111);
double sa_proc_neg = RawbitsToDouble(0xfffd5555aaaaaaaa);
double q1_proc_neg = RawbitsToDouble(0xfffaaaaa11111111);
double qa_proc_neg = RawbitsToDouble(0xfffaaaaaaaaaaaaa);
VIXL_ASSERT(IsQuietNaN(s1_proc_neg));
VIXL_ASSERT(IsQuietNaN(sa_proc_neg));
VIXL_ASSERT(IsQuietNaN(q1_proc_neg));
VIXL_ASSERT(IsQuietNaN(qa_proc_neg));
double sig1_proc_neg = RawbitsToDouble(0xfffd555511111111);
double siga_proc_neg = RawbitsToDouble(0xfffd5555aaaaaaaa);
double qui1_proc_neg = RawbitsToDouble(0xfffaaaaa11111111);
double quia_proc_neg = RawbitsToDouble(0xfffaaaaaaaaaaaaa);
VIXL_ASSERT(IsQuietNaN(sig1_proc_neg));
VIXL_ASSERT(IsQuietNaN(siga_proc_neg));
VIXL_ASSERT(IsQuietNaN(qui1_proc_neg));
VIXL_ASSERT(IsQuietNaN(quia_proc_neg));
// Quiet NaNs are propagated.
FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
FmaddFmsubHelper(qui1,
0,
0,
qui1_proc,
qui1_proc_neg,
qui1_proc_neg,
qui1_proc);
FmaddFmsubHelper(0, qui2, 0, qui2_proc, qui2_proc, qui2_proc, qui2_proc);
FmaddFmsubHelper(0,
0,
quia,
quia_proc,
quia_proc,
quia_proc_neg,
quia_proc_neg);
FmaddFmsubHelper(qui1,
qui2,
0,
qui1_proc,
qui1_proc_neg,
qui1_proc_neg,
qui1_proc);
FmaddFmsubHelper(0,
qui2,
quia,
quia_proc,
quia_proc,
quia_proc_neg,
quia_proc_neg);
FmaddFmsubHelper(qui1,
0,
quia,
quia_proc,
quia_proc,
quia_proc_neg,
quia_proc_neg);
FmaddFmsubHelper(qui1,
qui2,
quia,
quia_proc,
quia_proc,
quia_proc_neg,
quia_proc_neg);
// Signalling NaNs are propagated, and made quiet.
FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(sig1,
0,
0,
sig1_proc,
sig1_proc_neg,
sig1_proc_neg,
sig1_proc);
FmaddFmsubHelper(0, sig2, 0, sig2_proc, sig2_proc, sig2_proc, sig2_proc);
FmaddFmsubHelper(0,
0,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
sig2,
0,
sig1_proc,
sig1_proc_neg,
sig1_proc_neg,
sig1_proc);
FmaddFmsubHelper(0,
sig2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
0,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
sig2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
// Signalling NaNs take precedence over quiet NaNs.
FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(sig1,
qui2,
quia,
sig1_proc,
sig1_proc_neg,
sig1_proc_neg,
sig1_proc);
FmaddFmsubHelper(qui1,
sig2,
quia,
sig2_proc,
sig2_proc,
sig2_proc,
sig2_proc);
FmaddFmsubHelper(qui1,
qui2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
sig2,
quia,
sig1_proc,
sig1_proc_neg,
sig1_proc_neg,
sig1_proc);
FmaddFmsubHelper(qui1,
sig2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
qui2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
sig2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
// A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
FmaddFmsubHelper(0,
kFP64PositiveInfinity,
qa,
quia,
kFP64DefaultNaN,
kFP64DefaultNaN,
kFP64DefaultNaN,
kFP64DefaultNaN);
FmaddFmsubHelper(kFP64PositiveInfinity,
0,
qa,
quia,
kFP64DefaultNaN,
kFP64DefaultNaN,
kFP64DefaultNaN,
kFP64DefaultNaN);
FmaddFmsubHelper(0,
kFP64NegativeInfinity,
qa,
quia,
kFP64DefaultNaN,
kFP64DefaultNaN,
kFP64DefaultNaN,
kFP64DefaultNaN);
FmaddFmsubHelper(kFP64NegativeInfinity,
0,
qa,
quia,
kFP64DefaultNaN,
kFP64DefaultNaN,
kFP64DefaultNaN,
@ -1003,95 +1117,209 @@ TEST(fmadd_fmsub_double_nans) {
TEST(fmadd_fmsub_float_nans) {
// Make sure that NaN propagation works correctly.
float s1 = RawbitsToFloat(0x7f951111);
float s2 = RawbitsToFloat(0x7f952222);
float sa = RawbitsToFloat(0x7f95aaaa);
float q1 = RawbitsToFloat(0x7fea1111);
float q2 = RawbitsToFloat(0x7fea2222);
float qa = RawbitsToFloat(0x7feaaaaa);
VIXL_ASSERT(IsSignallingNaN(s1));
VIXL_ASSERT(IsSignallingNaN(s2));
VIXL_ASSERT(IsSignallingNaN(sa));
VIXL_ASSERT(IsQuietNaN(q1));
VIXL_ASSERT(IsQuietNaN(q2));
VIXL_ASSERT(IsQuietNaN(qa));
float sig1 = RawbitsToFloat(0x7f951111);
float sig2 = RawbitsToFloat(0x7f952222);
float siga = RawbitsToFloat(0x7f95aaaa);
float qui1 = RawbitsToFloat(0x7fea1111);
float qui2 = RawbitsToFloat(0x7fea2222);
float quia = RawbitsToFloat(0x7feaaaaa);
VIXL_ASSERT(IsSignallingNaN(sig1));
VIXL_ASSERT(IsSignallingNaN(sig2));
VIXL_ASSERT(IsSignallingNaN(siga));
VIXL_ASSERT(IsQuietNaN(qui1));
VIXL_ASSERT(IsQuietNaN(qui2));
VIXL_ASSERT(IsQuietNaN(quia));
// The input NaNs after passing through ProcessNaN.
float s1_proc = RawbitsToFloat(0x7fd51111);
float s2_proc = RawbitsToFloat(0x7fd52222);
float sa_proc = RawbitsToFloat(0x7fd5aaaa);
float q1_proc = q1;
float q2_proc = q2;
float qa_proc = qa;
VIXL_ASSERT(IsQuietNaN(s1_proc));
VIXL_ASSERT(IsQuietNaN(s2_proc));
VIXL_ASSERT(IsQuietNaN(sa_proc));
VIXL_ASSERT(IsQuietNaN(q1_proc));
VIXL_ASSERT(IsQuietNaN(q2_proc));
VIXL_ASSERT(IsQuietNaN(qa_proc));
float sig1_proc = RawbitsToFloat(0x7fd51111);
float sig2_proc = RawbitsToFloat(0x7fd52222);
float siga_proc = RawbitsToFloat(0x7fd5aaaa);
float qui1_proc = qui1;
float qui2_proc = qui2;
float quia_proc = quia;
VIXL_ASSERT(IsQuietNaN(sig1_proc));
VIXL_ASSERT(IsQuietNaN(sig2_proc));
VIXL_ASSERT(IsQuietNaN(siga_proc));
VIXL_ASSERT(IsQuietNaN(qui1_proc));
VIXL_ASSERT(IsQuietNaN(qui2_proc));
VIXL_ASSERT(IsQuietNaN(quia_proc));
// Negated NaNs as it would be done on ARMv8 hardware.
float s1_proc_neg = RawbitsToFloat(0xffd51111);
float sa_proc_neg = RawbitsToFloat(0xffd5aaaa);
float q1_proc_neg = RawbitsToFloat(0xffea1111);
float qa_proc_neg = RawbitsToFloat(0xffeaaaaa);
VIXL_ASSERT(IsQuietNaN(s1_proc_neg));
VIXL_ASSERT(IsQuietNaN(sa_proc_neg));
VIXL_ASSERT(IsQuietNaN(q1_proc_neg));
VIXL_ASSERT(IsQuietNaN(qa_proc_neg));
float sig1_proc_neg = RawbitsToFloat(0xffd51111);
float siga_proc_neg = RawbitsToFloat(0xffd5aaaa);
float qui1_proc_neg = RawbitsToFloat(0xffea1111);
float quia_proc_neg = RawbitsToFloat(0xffeaaaaa);
VIXL_ASSERT(IsQuietNaN(sig1_proc_neg));
VIXL_ASSERT(IsQuietNaN(siga_proc_neg));
VIXL_ASSERT(IsQuietNaN(qui1_proc_neg));
VIXL_ASSERT(IsQuietNaN(quia_proc_neg));
// Quiet NaNs are propagated.
FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
FmaddFmsubHelper(qui1,
0,
0,
qui1_proc,
qui1_proc_neg,
qui1_proc_neg,
qui1_proc);
FmaddFmsubHelper(0, qui2, 0, qui2_proc, qui2_proc, qui2_proc, qui2_proc);
FmaddFmsubHelper(0,
0,
quia,
quia_proc,
quia_proc,
quia_proc_neg,
quia_proc_neg);
FmaddFmsubHelper(qui1,
qui2,
0,
qui1_proc,
qui1_proc_neg,
qui1_proc_neg,
qui1_proc);
FmaddFmsubHelper(0,
qui2,
quia,
quia_proc,
quia_proc,
quia_proc_neg,
quia_proc_neg);
FmaddFmsubHelper(qui1,
0,
quia,
quia_proc,
quia_proc,
quia_proc_neg,
quia_proc_neg);
FmaddFmsubHelper(qui1,
qui2,
quia,
quia_proc,
quia_proc,
quia_proc_neg,
quia_proc_neg);
// Signalling NaNs are propagated, and made quiet.
FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(sig1,
0,
0,
sig1_proc,
sig1_proc_neg,
sig1_proc_neg,
sig1_proc);
FmaddFmsubHelper(0, sig2, 0, sig2_proc, sig2_proc, sig2_proc, sig2_proc);
FmaddFmsubHelper(0,
0,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
sig2,
0,
sig1_proc,
sig1_proc_neg,
sig1_proc_neg,
sig1_proc);
FmaddFmsubHelper(0,
sig2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
0,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
sig2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
// Signalling NaNs take precedence over quiet NaNs.
FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
FmaddFmsubHelper(sig1,
qui2,
quia,
sig1_proc,
sig1_proc_neg,
sig1_proc_neg,
sig1_proc);
FmaddFmsubHelper(qui1,
sig2,
quia,
sig2_proc,
sig2_proc,
sig2_proc,
sig2_proc);
FmaddFmsubHelper(qui1,
qui2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
sig2,
quia,
sig1_proc,
sig1_proc_neg,
sig1_proc_neg,
sig1_proc);
FmaddFmsubHelper(qui1,
sig2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
qui2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
FmaddFmsubHelper(sig1,
sig2,
siga,
siga_proc,
siga_proc,
siga_proc_neg,
siga_proc_neg);
// A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
FmaddFmsubHelper(0,
kFP32PositiveInfinity,
qa,
quia,
kFP32DefaultNaN,
kFP32DefaultNaN,
kFP32DefaultNaN,
kFP32DefaultNaN);
FmaddFmsubHelper(kFP32PositiveInfinity,
0,
qa,
quia,
kFP32DefaultNaN,
kFP32DefaultNaN,
kFP32DefaultNaN,
kFP32DefaultNaN);
FmaddFmsubHelper(0,
kFP32NegativeInfinity,
qa,
quia,
kFP32DefaultNaN,
kFP32DefaultNaN,
kFP32DefaultNaN,
kFP32DefaultNaN);
FmaddFmsubHelper(kFP32NegativeInfinity,
0,
qa,
quia,
kFP32DefaultNaN,
kFP32DefaultNaN,
kFP32DefaultNaN,

View File

@ -5472,6 +5472,9 @@ TEST_SVE(sve_addpl) {
}
TEST_SVE(sve_calculate_sve_address) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
// Shadow the `MacroAssembler` type so that the test macros work without
// modification.
typedef CalculateSVEAddressMacroAssembler MacroAssembler;
@ -5581,6 +5584,7 @@ TEST_SVE(sve_calculate_sve_address) {
ASSERT_EQUAL_64(0xabcd404400000000 - 48, x28);
ASSERT_EQUAL_64(0xabcd505500000000 - (48 << 4), x29);
}
#pragma GCC diagnostic pop
}
TEST_SVE(sve_permute_vector_unpredicated) {
@ -11620,19 +11624,19 @@ static void SdotUdotHelper(Test* config,
const ZRegister& za,
const ZRegister& zn,
const ZRegister& zm,
bool is_signed,
int index) {
if (is_signed) {
if (index < 0) {
bool is_signed_fn,
int index_fn) {
if (is_signed_fn) {
if (index_fn < 0) {
__ Sdot(zd, za, zn, zm);
} else {
__ Sdot(zd, za, zn, zm, index);
__ Sdot(zd, za, zn, zm, index_fn);
}
} else {
if (index < 0) {
if (index_fn < 0) {
__ Udot(zd, za, zn, zm);
} else {
__ Udot(zd, za, zn, zm, index);
__ Udot(zd, za, zn, zm, index_fn);
}
}
};

View File

@ -54,9 +54,10 @@
do { \
printf("----\n"); \
PrintDisassembler print_disasm(stdout); \
Instruction* start = masm.GetBuffer()->GetStartAddress<Instruction*>(); \
Instruction* end = masm.GetBuffer()->GetEndAddress<Instruction*>(); \
print_disasm.DisassembleBuffer(start, end); \
Instruction* dis_start = \
masm.GetBuffer()->GetStartAddress<Instruction*>(); \
Instruction* dis_end = masm.GetBuffer()->GetEndAddress<Instruction*>(); \
print_disasm.DisassembleBuffer(dis_start, dis_end); \
} while (0)
#define COMPARE(ASM, EXP) \

View File

@ -164,6 +164,9 @@ TEST(sve_address_generation) {
}
TEST(sve_calculate_sve_address) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
// Shadow the `MacroAssembler` type so that the test macros work without
// modification.
typedef CalculateSVEAddressMacroAssembler MacroAssembler;
@ -208,6 +211,8 @@ TEST(sve_calculate_sve_address) {
"add x22, sp, x3, lsl #2");
CLEANUP();
#pragma GCC diagnostic pop
}
TEST(sve_bitwise_imm) {
@ -1134,6 +1139,9 @@ TEST(sve_fp_arithmetic_predicated) {
}
TEST(sve_fp_arithmetic_predicated_macro_fast_nan_propagation) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
// Shadow the `MacroAssembler` type so that the test macros work without
// modification.
typedef FastNaNPropagationMacroAssembler MacroAssembler;
@ -1183,9 +1191,14 @@ TEST(sve_fp_arithmetic_predicated_macro_fast_nan_propagation) {
"fmin z15.d, p6/m, z15.d, z8.d");
CLEANUP();
#pragma GCC diagnostic pop
}
TEST(sve_fp_arithmetic_predicated_macro_strict_nan_propagation) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
// Shadow the `MacroAssembler` type so that the test macros work without
// modification.
typedef StrictNaNPropagationMacroAssembler MacroAssembler;
@ -1239,6 +1252,8 @@ TEST(sve_fp_arithmetic_predicated_macro_strict_nan_propagation) {
"fmin z15.d, p6/m, z15.d, z8.d");
CLEANUP();
#pragma GCC diagnostic pop
}
TEST(sve_fp_arithmetic_unpredicated) {
@ -1623,6 +1638,9 @@ TEST(sve_fp_mul_add) {
}
TEST(sve_fp_mul_add_macro_strict_nan_propagation) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
// Shadow the `MacroAssembler` type so that the test macros work without
// modification.
typedef StrictNaNPropagationMacroAssembler MacroAssembler;
@ -1678,9 +1696,14 @@ TEST(sve_fp_mul_add_macro_strict_nan_propagation) {
"fnmls z15.d, p0/m, z17.d, z18.d");
CLEANUP();
#pragma GCC diagnostic pop
}
TEST(sve_fp_mul_add_macro_fast_nan_propagation) {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
// Shadow the `MacroAssembler` type so that the test macros work without
// modification.
typedef FastNaNPropagationMacroAssembler MacroAssembler;
@ -1728,6 +1751,8 @@ TEST(sve_fp_mul_add_macro_fast_nan_propagation) {
"fnmls z15.d, p0/m, z17.d, z18.d");
CLEANUP();
#pragma GCC diagnostic pop
}
TEST(sve_fp_mul_add_index) {

View File

@ -421,9 +421,9 @@ TEST(FuzzObjectDeletedWhenPlaced) {
// Remove bound objects.
for (std::vector<TestObject *>::iterator iter = objects.begin();
iter != objects.end();) {
TestObject *object = *iter;
if (object->IsBound()) {
delete object;
TestObject *obj = *iter;
if (obj->IsBound()) {
delete obj;
iter = objects.erase(iter);
} else {
++iter;
@ -494,7 +494,7 @@ TEST(FuzzObjectUpdatedWhenPlaced) {
// Pick another random label to bind.
const int kProbabilityToBind = 20;
if ((Random() % 100) < kProbabilityToBind) {
TestBranchObject *object = objects[RandomObjectID(objects.size())];
TestBranchObject *object2 = objects[RandomObjectID(objects.size())];
// Binding can cause the pool emission, so check if we need to emit
// the pools. The actual backends will know the max alignment we
// might need here, so can simplify the check (won't need to check
@ -503,15 +503,15 @@ TEST(FuzzObjectUpdatedWhenPlaced) {
if (pool_manager.MustEmit(pc, max_padding)) {
pc = pool_manager.Emit(&masm, pc, max_padding);
}
pc = pool_manager.Bind(&masm, object, pc);
pc = pool_manager.Bind(&masm, object2, pc);
}
// Remove bound objects.
for (std::vector<TestBranchObject *>::iterator iter = objects.begin();
iter != objects.end();) {
TestBranchObject *object = *iter;
if (object->IsBound()) {
delete object;
TestBranchObject *obj = *iter;
if (obj->IsBound()) {
delete obj;
iter = objects.erase(iter);
} else {
++iter;
@ -818,9 +818,9 @@ TEST(MustEmitNewReferenceDueToSizeOfObject) {
{
// If the object is smaller, we can emit the reference.
TestObject smaller_object(kBigObjectSize - 4, 1);
ForwardReference<int32_t> temp_ref(pc, kBranchSize, pc, pc + kPoolSize);
ForwardReference<int32_t> temp_ref2(pc, kBranchSize, pc, pc + kPoolSize);
VIXL_ASSERT(
!pool_manager.MustEmit(pc, kBranchSize, &temp_ref, &smaller_object));
!pool_manager.MustEmit(pc, kBranchSize, &temp_ref2, &smaller_object));
// If the reference is going to be added after the current objects in the
// pool, we can still emit it.