mirror of
https://github.com/WinampDesktop/winamp.git
synced 2025-06-19 05:15:46 -04:00
dep: Add vixl (AArch32/64 assembler)
This commit is contained in:
6084
dep/vixl/src/aarch64/assembler-aarch64.cc
Normal file
6084
dep/vixl/src/aarch64/assembler-aarch64.cc
Normal file
File diff suppressed because it is too large
Load Diff
178
dep/vixl/src/aarch64/cpu-aarch64.cc
Normal file
178
dep/vixl/src/aarch64/cpu-aarch64.cc
Normal file
@ -0,0 +1,178 @@
|
||||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "../utils-vixl.h"
|
||||
|
||||
#include "cpu-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// Initialise to smallest possible cache size.
|
||||
unsigned CPU::dcache_line_size_ = 1;
|
||||
unsigned CPU::icache_line_size_ = 1;
|
||||
|
||||
|
||||
// Currently computes I and D cache line size.
|
||||
void CPU::SetUp() {
|
||||
uint32_t cache_type_register = GetCacheType();
|
||||
|
||||
// The cache type register holds information about the caches, including I
|
||||
// D caches line size.
|
||||
static const int kDCacheLineSizeShift = 16;
|
||||
static const int kICacheLineSizeShift = 0;
|
||||
static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
|
||||
static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
|
||||
|
||||
// The cache type register holds the size of the I and D caches in words as
|
||||
// a power of two.
|
||||
uint32_t dcache_line_size_power_of_two =
|
||||
(cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
|
||||
uint32_t icache_line_size_power_of_two =
|
||||
(cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
|
||||
|
||||
dcache_line_size_ = 4 << dcache_line_size_power_of_two;
|
||||
icache_line_size_ = 4 << icache_line_size_power_of_two;
|
||||
}
|
||||
|
||||
|
||||
uint32_t CPU::GetCacheType() {
|
||||
#ifdef __aarch64__
|
||||
uint64_t cache_type_register;
|
||||
// Copy the content of the cache type register to a core register.
|
||||
__asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT(runtime/references)
|
||||
: [ctr] "=r"(cache_type_register));
|
||||
VIXL_ASSERT(IsUint32(cache_type_register));
|
||||
return static_cast<uint32_t>(cache_type_register);
|
||||
#else
|
||||
// This will lead to a cache with 1 byte long lines, which is fine since
|
||||
// neither EnsureIAndDCacheCoherency nor the simulator will need this
|
||||
// information.
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) {
|
||||
#ifdef __aarch64__
|
||||
// Implement the cache synchronisation for all targets where AArch64 is the
|
||||
// host, even if we're building the simulator for an AAarch64 host. This
|
||||
// allows for cases where the user wants to simulate code as well as run it
|
||||
// natively.
|
||||
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// The code below assumes user space cache operations are allowed.
|
||||
|
||||
// Work out the line sizes for each cache, and use them to determine the
|
||||
// start addresses.
|
||||
uintptr_t start = reinterpret_cast<uintptr_t>(address);
|
||||
uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_);
|
||||
uintptr_t isize = static_cast<uintptr_t>(icache_line_size_);
|
||||
uintptr_t dline = start & ~(dsize - 1);
|
||||
uintptr_t iline = start & ~(isize - 1);
|
||||
|
||||
// Cache line sizes are always a power of 2.
|
||||
VIXL_ASSERT(IsPowerOf2(dsize));
|
||||
VIXL_ASSERT(IsPowerOf2(isize));
|
||||
uintptr_t end = start + length;
|
||||
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
// Clean each line of the D cache containing the target data.
|
||||
//
|
||||
// dc : Data Cache maintenance
|
||||
// c : Clean
|
||||
// va : by (Virtual) Address
|
||||
// u : to the point of Unification
|
||||
// The point of unification for a processor is the point by which the
|
||||
// instruction and data caches are guaranteed to see the same copy of a
|
||||
// memory location. See ARM DDI 0406B page B2-12 for more information.
|
||||
" dc cvau, %[dline]\n"
|
||||
:
|
||||
: [dline] "r"(dline)
|
||||
// This code does not write to memory, but the "memory" dependency
|
||||
// prevents GCC from reordering the code.
|
||||
: "memory");
|
||||
dline += dsize;
|
||||
} while (dline < end);
|
||||
|
||||
__asm__ __volatile__(
|
||||
// Make sure that the data cache operations (above) complete before the
|
||||
// instruction cache operations (below).
|
||||
//
|
||||
// dsb : Data Synchronisation Barrier
|
||||
// ish : Inner SHareable domain
|
||||
//
|
||||
// The point of unification for an Inner Shareable shareability domain is
|
||||
// the point by which the instruction and data caches of all the
|
||||
// processors
|
||||
// in that Inner Shareable shareability domain are guaranteed to see the
|
||||
// same copy of a memory location. See ARM DDI 0406B page B2-12 for more
|
||||
// information.
|
||||
" dsb ish\n"
|
||||
:
|
||||
:
|
||||
: "memory");
|
||||
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
// Invalidate each line of the I cache containing the target data.
|
||||
//
|
||||
// ic : Instruction Cache maintenance
|
||||
// i : Invalidate
|
||||
// va : by Address
|
||||
// u : to the point of Unification
|
||||
" ic ivau, %[iline]\n"
|
||||
:
|
||||
: [iline] "r"(iline)
|
||||
: "memory");
|
||||
iline += isize;
|
||||
} while (iline < end);
|
||||
|
||||
__asm__ __volatile__(
|
||||
// Make sure that the instruction cache operations (above) take effect
|
||||
// before the isb (below).
|
||||
" dsb ish\n"
|
||||
|
||||
// Ensure that any instructions already in the pipeline are discarded and
|
||||
// reloaded from the new data.
|
||||
// isb : Instruction Synchronisation Barrier
|
||||
" isb\n"
|
||||
:
|
||||
:
|
||||
: "memory");
|
||||
#else
|
||||
// If the host isn't AArch64, we must be using the simulator, so this function
|
||||
// doesn't have to do anything.
|
||||
USE(address, length);
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
1059
dep/vixl/src/aarch64/cpu-features-auditor-aarch64.cc
Normal file
1059
dep/vixl/src/aarch64/cpu-features-auditor-aarch64.cc
Normal file
File diff suppressed because it is too large
Load Diff
1029
dep/vixl/src/aarch64/decoder-aarch64.cc
Normal file
1029
dep/vixl/src/aarch64/decoder-aarch64.cc
Normal file
File diff suppressed because it is too large
Load Diff
5817
dep/vixl/src/aarch64/disasm-aarch64.cc
Normal file
5817
dep/vixl/src/aarch64/disasm-aarch64.cc
Normal file
File diff suppressed because it is too large
Load Diff
713
dep/vixl/src/aarch64/instructions-aarch64.cc
Normal file
713
dep/vixl/src/aarch64/instructions-aarch64.cc
Normal file
@ -0,0 +1,713 @@
|
||||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
#include "assembler-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
|
||||
uint64_t value,
|
||||
unsigned width) {
|
||||
VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
|
||||
(width == 32));
|
||||
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
|
||||
uint64_t result = value & ((UINT64_C(1) << width) - 1);
|
||||
for (unsigned i = width; i < reg_size; i *= 2) {
|
||||
result |= (result << i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsLoad() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) != 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
|
||||
switch (op) {
|
||||
case LDRB_w:
|
||||
case LDRH_w:
|
||||
case LDR_w:
|
||||
case LDR_x:
|
||||
case LDRSB_w:
|
||||
case LDRSB_x:
|
||||
case LDRSH_w:
|
||||
case LDRSH_x:
|
||||
case LDRSW_x:
|
||||
case LDR_b:
|
||||
case LDR_h:
|
||||
case LDR_s:
|
||||
case LDR_d:
|
||||
case LDR_q:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsStore() const {
|
||||
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
|
||||
return Mask(LoadStorePairLBit) == 0;
|
||||
} else {
|
||||
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
|
||||
switch (op) {
|
||||
case STRB_w:
|
||||
case STRH_w:
|
||||
case STR_w:
|
||||
case STR_x:
|
||||
case STR_b:
|
||||
case STR_h:
|
||||
case STR_s:
|
||||
case STR_d:
|
||||
case STR_q:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Logical immediates can't encode zero, so a return value of zero is used to
|
||||
// indicate a failure case. Specifically, where the constraints on imm_s are
|
||||
// not met.
|
||||
uint64_t Instruction::GetImmLogical() const {
|
||||
unsigned reg_size = GetSixtyFourBits() ? kXRegSize : kWRegSize;
|
||||
int32_t n = GetBitN();
|
||||
int32_t imm_s = GetImmSetBits();
|
||||
int32_t imm_r = GetImmRotate();
|
||||
|
||||
// An integer is constructed from the n, imm_s and imm_r bits according to
|
||||
// the following table:
|
||||
//
|
||||
// N imms immr size S R
|
||||
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
|
||||
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
|
||||
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
|
||||
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
|
||||
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
|
||||
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
|
||||
// (s bits must not be all set)
|
||||
//
|
||||
// A pattern is constructed of size bits, where the least significant S+1
|
||||
// bits are set. The pattern is rotated right by R, and repeated across a
|
||||
// 32 or 64-bit value, depending on destination register width.
|
||||
//
|
||||
|
||||
if (n == 1) {
|
||||
if (imm_s == 0x3f) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
|
||||
return RotateRight(bits, imm_r, 64);
|
||||
} else {
|
||||
if ((imm_s >> 1) == 0x1f) {
|
||||
return 0;
|
||||
}
|
||||
for (int width = 0x20; width >= 0x2; width >>= 1) {
|
||||
if ((imm_s & width) == 0) {
|
||||
int mask = width - 1;
|
||||
if ((imm_s & mask) == mask) {
|
||||
return 0;
|
||||
}
|
||||
uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
|
||||
return RepeatBitsAcrossReg(reg_size,
|
||||
RotateRight(bits, imm_r & mask, width),
|
||||
width);
|
||||
}
|
||||
}
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
uint32_t Instruction::GetImmNEONabcdefgh() const {
|
||||
return GetImmNEONabc() << 5 | GetImmNEONdefgh();
|
||||
}
|
||||
|
||||
|
||||
Float16 Instruction::Imm8ToFloat16(uint32_t imm8) {
|
||||
// Imm8: abcdefgh (8 bits)
|
||||
// Half: aBbb.cdef.gh00.0000 (16 bits)
|
||||
// where B is b ^ 1
|
||||
uint32_t bits = imm8;
|
||||
uint16_t bit7 = (bits >> 7) & 0x1;
|
||||
uint16_t bit6 = (bits >> 6) & 0x1;
|
||||
uint16_t bit5_to_0 = bits & 0x3f;
|
||||
uint16_t result = (bit7 << 15) | ((4 - bit6) << 12) | (bit5_to_0 << 6);
|
||||
return RawbitsToFloat16(result);
|
||||
}
|
||||
|
||||
|
||||
float Instruction::Imm8ToFP32(uint32_t imm8) {
|
||||
// Imm8: abcdefgh (8 bits)
|
||||
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
|
||||
// where B is b ^ 1
|
||||
uint32_t bits = imm8;
|
||||
uint32_t bit7 = (bits >> 7) & 0x1;
|
||||
uint32_t bit6 = (bits >> 6) & 0x1;
|
||||
uint32_t bit5_to_0 = bits & 0x3f;
|
||||
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
|
||||
|
||||
return RawbitsToFloat(result);
|
||||
}
|
||||
|
||||
|
||||
Float16 Instruction::GetImmFP16() const { return Imm8ToFloat16(GetImmFP()); }
|
||||
|
||||
|
||||
float Instruction::GetImmFP32() const { return Imm8ToFP32(GetImmFP()); }
|
||||
|
||||
|
||||
double Instruction::Imm8ToFP64(uint32_t imm8) {
|
||||
// Imm8: abcdefgh (8 bits)
|
||||
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
|
||||
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
|
||||
// where B is b ^ 1
|
||||
uint32_t bits = imm8;
|
||||
uint64_t bit7 = (bits >> 7) & 0x1;
|
||||
uint64_t bit6 = (bits >> 6) & 0x1;
|
||||
uint64_t bit5_to_0 = bits & 0x3f;
|
||||
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
|
||||
|
||||
return RawbitsToDouble(result);
|
||||
}
|
||||
|
||||
|
||||
double Instruction::GetImmFP64() const { return Imm8ToFP64(GetImmFP()); }
|
||||
|
||||
|
||||
Float16 Instruction::GetImmNEONFP16() const {
|
||||
return Imm8ToFloat16(GetImmNEONabcdefgh());
|
||||
}
|
||||
|
||||
|
||||
float Instruction::GetImmNEONFP32() const {
|
||||
return Imm8ToFP32(GetImmNEONabcdefgh());
|
||||
}
|
||||
|
||||
|
||||
double Instruction::GetImmNEONFP64() const {
|
||||
return Imm8ToFP64(GetImmNEONabcdefgh());
|
||||
}
|
||||
|
||||
|
||||
unsigned CalcLSDataSize(LoadStoreOp op) {
|
||||
VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
|
||||
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
|
||||
if ((op & LSVector_mask) != 0) {
|
||||
// Vector register memory operations encode the access size in the "size"
|
||||
// and "opc" fields.
|
||||
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
|
||||
size = kQRegSizeInBytesLog2;
|
||||
}
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
unsigned CalcLSPairDataSize(LoadStorePairOp op) {
|
||||
VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
|
||||
VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
|
||||
switch (op) {
|
||||
case STP_q:
|
||||
case LDP_q:
|
||||
return kQRegSizeInBytesLog2;
|
||||
case STP_x:
|
||||
case LDP_x:
|
||||
case STP_d:
|
||||
case LDP_d:
|
||||
return kXRegSizeInBytesLog2;
|
||||
default:
|
||||
return kWRegSizeInBytesLog2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int Instruction::GetImmBranchRangeBitwidth(ImmBranchType branch_type) {
|
||||
switch (branch_type) {
|
||||
case UncondBranchType:
|
||||
return ImmUncondBranch_width;
|
||||
case CondBranchType:
|
||||
return ImmCondBranch_width;
|
||||
case CompareBranchType:
|
||||
return ImmCmpBranch_width;
|
||||
case TestBranchType:
|
||||
return ImmTestBranch_width;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int32_t Instruction::GetImmBranchForwardRange(ImmBranchType branch_type) {
|
||||
int32_t encoded_max = 1 << (GetImmBranchRangeBitwidth(branch_type) - 1);
|
||||
return encoded_max * kInstructionSize;
|
||||
}
|
||||
|
||||
|
||||
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
|
||||
int64_t offset) {
|
||||
return IsIntN(GetImmBranchRangeBitwidth(branch_type), offset);
|
||||
}
|
||||
|
||||
|
||||
const Instruction* Instruction::GetImmPCOffsetTarget() const {
|
||||
const Instruction* base = this;
|
||||
ptrdiff_t offset;
|
||||
if (IsPCRelAddressing()) {
|
||||
// ADR and ADRP.
|
||||
offset = GetImmPCRel();
|
||||
if (Mask(PCRelAddressingMask) == ADRP) {
|
||||
base = AlignDown(base, kPageSize);
|
||||
offset *= kPageSize;
|
||||
} else {
|
||||
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
|
||||
}
|
||||
} else {
|
||||
// All PC-relative branches.
|
||||
VIXL_ASSERT(GetBranchType() != UnknownBranchType);
|
||||
// Relative branch offsets are instruction-size-aligned.
|
||||
offset = GetImmBranch() * static_cast<int>(kInstructionSize);
|
||||
}
|
||||
return base + offset;
|
||||
}
|
||||
|
||||
|
||||
int Instruction::GetImmBranch() const {
|
||||
switch (GetBranchType()) {
|
||||
case CondBranchType:
|
||||
return GetImmCondBranch();
|
||||
case UncondBranchType:
|
||||
return GetImmUncondBranch();
|
||||
case CompareBranchType:
|
||||
return GetImmCmpBranch();
|
||||
case TestBranchType:
|
||||
return GetImmTestBranch();
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
|
||||
if (IsPCRelAddressing()) {
|
||||
SetPCRelImmTarget(target);
|
||||
} else {
|
||||
SetBranchImmTarget(target);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetPCRelImmTarget(const Instruction* target) {
|
||||
ptrdiff_t imm21;
|
||||
if ((Mask(PCRelAddressingMask) == ADR)) {
|
||||
imm21 = target - this;
|
||||
} else {
|
||||
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
|
||||
uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
|
||||
uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
|
||||
imm21 = target_page - this_page;
|
||||
}
|
||||
Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
|
||||
|
||||
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetBranchImmTarget(const Instruction* target) {
|
||||
VIXL_ASSERT(((target - this) & 3) == 0);
|
||||
Instr branch_imm = 0;
|
||||
uint32_t imm_mask = 0;
|
||||
int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
|
||||
switch (GetBranchType()) {
|
||||
case CondBranchType: {
|
||||
branch_imm = Assembler::ImmCondBranch(offset);
|
||||
imm_mask = ImmCondBranch_mask;
|
||||
break;
|
||||
}
|
||||
case UncondBranchType: {
|
||||
branch_imm = Assembler::ImmUncondBranch(offset);
|
||||
imm_mask = ImmUncondBranch_mask;
|
||||
break;
|
||||
}
|
||||
case CompareBranchType: {
|
||||
branch_imm = Assembler::ImmCmpBranch(offset);
|
||||
imm_mask = ImmCmpBranch_mask;
|
||||
break;
|
||||
}
|
||||
case TestBranchType: {
|
||||
branch_imm = Assembler::ImmTestBranch(offset);
|
||||
imm_mask = ImmTestBranch_mask;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
SetInstructionBits(Mask(~imm_mask) | branch_imm);
|
||||
}
|
||||
|
||||
|
||||
void Instruction::SetImmLLiteral(const Instruction* source) {
|
||||
VIXL_ASSERT(IsWordAligned(source));
|
||||
ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
|
||||
Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
|
||||
Instr mask = ImmLLiteral_mask;
|
||||
|
||||
SetInstructionBits(Mask(~mask) | imm);
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
|
||||
vform == kFormatH || vform == kFormatS || vform == kFormatD);
|
||||
switch (vform) {
|
||||
case kFormat8H:
|
||||
return kFormat8B;
|
||||
case kFormat4S:
|
||||
return kFormat4H;
|
||||
case kFormat2D:
|
||||
return kFormat2S;
|
||||
case kFormatH:
|
||||
return kFormatB;
|
||||
case kFormatS:
|
||||
return kFormatH;
|
||||
case kFormatD:
|
||||
return kFormatS;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
|
||||
vform == kFormatB || vform == kFormatH || vform == kFormatS);
|
||||
switch (vform) {
|
||||
case kFormat8B:
|
||||
return kFormat8H;
|
||||
case kFormat4H:
|
||||
return kFormat4S;
|
||||
case kFormat2S:
|
||||
return kFormat2D;
|
||||
case kFormatB:
|
||||
return kFormatH;
|
||||
case kFormatH:
|
||||
return kFormatS;
|
||||
case kFormatS:
|
||||
return kFormatD;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatFillQ(VectorFormat vform) {
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B:
|
||||
return kFormat16B;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H:
|
||||
return kFormat8H;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S:
|
||||
return kFormat4S;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D:
|
||||
return kFormat2D;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
|
||||
switch (vform) {
|
||||
case kFormat4H:
|
||||
return kFormat8B;
|
||||
case kFormat8H:
|
||||
return kFormat16B;
|
||||
case kFormat2S:
|
||||
return kFormat4H;
|
||||
case kFormat4S:
|
||||
return kFormat8H;
|
||||
case kFormat1D:
|
||||
return kFormat2S;
|
||||
case kFormat2D:
|
||||
return kFormat4S;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
|
||||
switch (vform) {
|
||||
case kFormat8B:
|
||||
return kFormat16B;
|
||||
case kFormat4H:
|
||||
return kFormat8H;
|
||||
case kFormat2S:
|
||||
return kFormat4S;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
|
||||
switch (vform) {
|
||||
case kFormat16B:
|
||||
return kFormat8B;
|
||||
case kFormat8H:
|
||||
return kFormat4H;
|
||||
case kFormat4S:
|
||||
return kFormat2S;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat ScalarFormatFromLaneSize(int laneSize) {
|
||||
switch (laneSize) {
|
||||
case 8:
|
||||
return kFormatB;
|
||||
case 16:
|
||||
return kFormatH;
|
||||
case 32:
|
||||
return kFormatS;
|
||||
case 64:
|
||||
return kFormatD;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return kFormatUndefined;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
|
||||
return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
|
||||
|
||||
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
return kBRegSize;
|
||||
case kFormatH:
|
||||
return kHRegSize;
|
||||
case kFormatS:
|
||||
case kFormat2H:
|
||||
return kSRegSize;
|
||||
case kFormatD:
|
||||
return kDRegSize;
|
||||
case kFormat8B:
|
||||
case kFormat4H:
|
||||
case kFormat2S:
|
||||
case kFormat1D:
|
||||
return kDRegSize;
|
||||
default:
|
||||
return kQRegSize;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
|
||||
return RegisterSizeInBitsFromFormat(vform) / 8;
|
||||
}
|
||||
|
||||
|
||||
unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B:
|
||||
return 8;
|
||||
case kFormatH:
|
||||
case kFormat2H:
|
||||
case kFormat4H:
|
||||
case kFormat8H:
|
||||
return 16;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S:
|
||||
return 32;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D:
|
||||
return 64;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int LaneSizeInBytesFromFormat(VectorFormat vform) {
|
||||
return LaneSizeInBitsFromFormat(vform) / 8;
|
||||
}
|
||||
|
||||
|
||||
int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B:
|
||||
return 0;
|
||||
case kFormatH:
|
||||
case kFormat2H:
|
||||
case kFormat4H:
|
||||
case kFormat8H:
|
||||
return 1;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S:
|
||||
return 2;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D:
|
||||
return 3;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int LaneCountFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormat16B:
|
||||
return 16;
|
||||
case kFormat8B:
|
||||
case kFormat8H:
|
||||
return 8;
|
||||
case kFormat4H:
|
||||
case kFormat4S:
|
||||
return 4;
|
||||
case kFormat2H:
|
||||
case kFormat2S:
|
||||
case kFormat2D:
|
||||
return 2;
|
||||
case kFormat1D:
|
||||
case kFormatB:
|
||||
case kFormatH:
|
||||
case kFormatS:
|
||||
case kFormatD:
|
||||
return 1;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int MaxLaneCountFromFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormat8B:
|
||||
case kFormat16B:
|
||||
return 16;
|
||||
case kFormatH:
|
||||
case kFormat4H:
|
||||
case kFormat8H:
|
||||
return 8;
|
||||
case kFormatS:
|
||||
case kFormat2S:
|
||||
case kFormat4S:
|
||||
return 4;
|
||||
case kFormatD:
|
||||
case kFormat1D:
|
||||
case kFormat2D:
|
||||
return 2;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Does 'vform' indicate a vector format or a scalar format?
|
||||
bool IsVectorFormat(VectorFormat vform) {
|
||||
VIXL_ASSERT(vform != kFormatUndefined);
|
||||
switch (vform) {
|
||||
case kFormatB:
|
||||
case kFormatH:
|
||||
case kFormatS:
|
||||
case kFormatD:
|
||||
return false;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int64_t MaxIntFromFormat(VectorFormat vform) {
|
||||
return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
|
||||
|
||||
int64_t MinIntFromFormat(VectorFormat vform) {
|
||||
return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
|
||||
|
||||
uint64_t MaxUintFromFormat(VectorFormat vform) {
|
||||
return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
|
||||
}
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
916
dep/vixl/src/aarch64/instrument-aarch64.cc
Normal file
916
dep/vixl/src/aarch64/instrument-aarch64.cc
Normal file
@ -0,0 +1,916 @@
|
||||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "instrument-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
Counter::Counter(const char* name, CounterType type)
|
||||
: count_(0), enabled_(false), type_(type) {
|
||||
VIXL_ASSERT(name != NULL);
|
||||
strncpy(name_, name, kCounterNameMaxLength);
|
||||
// Make sure `name_` is always NULL-terminated, even if the source's length is
|
||||
// higher.
|
||||
name_[kCounterNameMaxLength - 1] = '\0';
|
||||
}
|
||||
|
||||
|
||||
void Counter::Enable() { enabled_ = true; }
|
||||
|
||||
|
||||
void Counter::Disable() { enabled_ = false; }
|
||||
|
||||
|
||||
bool Counter::IsEnabled() { return enabled_; }
|
||||
|
||||
|
||||
void Counter::Increment() {
|
||||
if (enabled_) {
|
||||
count_++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
uint64_t Counter::GetCount() {
|
||||
uint64_t result = count_;
|
||||
if (type_ == Gauge) {
|
||||
// If the counter is a Gauge, reset the count after reading.
|
||||
count_ = 0;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
const char* Counter::GetName() { return name_; }
|
||||
|
||||
|
||||
CounterType Counter::GetType() { return type_; }
|
||||
|
||||
|
||||
struct CounterDescriptor {
|
||||
const char* name;
|
||||
CounterType type;
|
||||
};
|
||||
|
||||
|
||||
static const CounterDescriptor kCounterList[] =
|
||||
{{"Instruction", Cumulative},
|
||||
|
||||
{"Move Immediate", Gauge},
|
||||
{"Add/Sub DP", Gauge},
|
||||
{"Logical DP", Gauge},
|
||||
{"Other Int DP", Gauge},
|
||||
{"FP DP", Gauge},
|
||||
|
||||
{"Conditional Select", Gauge},
|
||||
{"Conditional Compare", Gauge},
|
||||
|
||||
{"Unconditional Branch", Gauge},
|
||||
{"Compare and Branch", Gauge},
|
||||
{"Test and Branch", Gauge},
|
||||
{"Conditional Branch", Gauge},
|
||||
|
||||
{"Load Integer", Gauge},
|
||||
{"Load FP", Gauge},
|
||||
{"Load Pair", Gauge},
|
||||
{"Load Literal", Gauge},
|
||||
|
||||
{"Store Integer", Gauge},
|
||||
{"Store FP", Gauge},
|
||||
{"Store Pair", Gauge},
|
||||
|
||||
{"PC Addressing", Gauge},
|
||||
{"Other", Gauge},
|
||||
{"NEON", Gauge},
|
||||
{"Crypto", Gauge}};
|
||||
|
||||
|
||||
Instrument::Instrument(const char* datafile, uint64_t sample_period)
|
||||
: output_stream_(stdout), sample_period_(sample_period) {
|
||||
// Set up the output stream. If datafile is non-NULL, use that file. If it
|
||||
// can't be opened, or datafile is NULL, use stdout.
|
||||
if (datafile != NULL) {
|
||||
output_stream_ = fopen(datafile, "w");
|
||||
if (output_stream_ == NULL) {
|
||||
printf("Can't open output file %s. Using stdout.\n", datafile);
|
||||
output_stream_ = stdout;
|
||||
}
|
||||
}
|
||||
|
||||
static const int num_counters =
|
||||
sizeof(kCounterList) / sizeof(CounterDescriptor);
|
||||
|
||||
// Dump an instrumentation description comment at the top of the file.
|
||||
fprintf(output_stream_, "# counters=%d\n", num_counters);
|
||||
fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
|
||||
|
||||
// Construct Counter objects from counter description array.
|
||||
for (int i = 0; i < num_counters; i++) {
|
||||
Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
|
||||
counters_.push_back(counter);
|
||||
}
|
||||
|
||||
DumpCounterNames();
|
||||
}
|
||||
|
||||
|
||||
Instrument::~Instrument() {
|
||||
// Dump any remaining instruction data to the output file.
|
||||
DumpCounters();
|
||||
|
||||
// Free all the counter objects.
|
||||
std::list<Counter*>::iterator it;
|
||||
for (it = counters_.begin(); it != counters_.end(); it++) {
|
||||
delete *it;
|
||||
}
|
||||
|
||||
if (output_stream_ != stdout) {
|
||||
fclose(output_stream_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instrument::Update() {
|
||||
// Increment the instruction counter, and dump all counters if a sample period
|
||||
// has elapsed.
|
||||
static Counter* counter = GetCounter("Instruction");
|
||||
VIXL_ASSERT(counter->GetType() == Cumulative);
|
||||
counter->Increment();
|
||||
|
||||
if ((sample_period_ != 0) && counter->IsEnabled() &&
|
||||
(counter->GetCount() % sample_period_) == 0) {
|
||||
DumpCounters();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instrument::DumpCounters() {
|
||||
// Iterate through the counter objects, dumping their values to the output
|
||||
// stream.
|
||||
std::list<Counter*>::const_iterator it;
|
||||
for (it = counters_.begin(); it != counters_.end(); it++) {
|
||||
fprintf(output_stream_, "%" PRIu64 ",", (*it)->GetCount());
|
||||
}
|
||||
fprintf(output_stream_, "\n");
|
||||
fflush(output_stream_);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::DumpCounterNames() {
|
||||
// Iterate through the counter objects, dumping the counter names to the
|
||||
// output stream.
|
||||
std::list<Counter*>::const_iterator it;
|
||||
for (it = counters_.begin(); it != counters_.end(); it++) {
|
||||
fprintf(output_stream_, "%s,", (*it)->GetName());
|
||||
}
|
||||
fprintf(output_stream_, "\n");
|
||||
fflush(output_stream_);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::HandleInstrumentationEvent(unsigned event) {
|
||||
switch (event) {
|
||||
case InstrumentStateEnable:
|
||||
Enable();
|
||||
break;
|
||||
case InstrumentStateDisable:
|
||||
Disable();
|
||||
break;
|
||||
default:
|
||||
DumpEventMarker(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instrument::DumpEventMarker(unsigned marker) {
|
||||
// Dumpan event marker to the output stream as a specially formatted comment
|
||||
// line.
|
||||
static Counter* counter = GetCounter("Instruction");
|
||||
|
||||
fprintf(output_stream_,
|
||||
"# %c%c @ %" PRId64 "\n",
|
||||
marker & 0xff,
|
||||
(marker >> 8) & 0xff,
|
||||
counter->GetCount());
|
||||
}
|
||||
|
||||
|
||||
Counter* Instrument::GetCounter(const char* name) {
|
||||
// Get a Counter object by name from the counter list.
|
||||
std::list<Counter*>::const_iterator it;
|
||||
for (it = counters_.begin(); it != counters_.end(); it++) {
|
||||
if (strcmp((*it)->GetName(), name) == 0) {
|
||||
return *it;
|
||||
}
|
||||
}
|
||||
|
||||
// A Counter by that name does not exist: print an error message to stderr
|
||||
// and the output file, and exit.
|
||||
static const char* error_message =
|
||||
"# Error: Unknown counter \"%s\". Exiting.\n";
|
||||
fprintf(stderr, error_message, name);
|
||||
fprintf(output_stream_, error_message, name);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::Enable() {
|
||||
std::list<Counter*>::iterator it;
|
||||
for (it = counters_.begin(); it != counters_.end(); it++) {
|
||||
(*it)->Enable();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instrument::Disable() {
|
||||
std::list<Counter*>::iterator it;
|
||||
for (it = counters_.begin(); it != counters_.end(); it++) {
|
||||
(*it)->Disable();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitPCRelAddressing(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("PC Addressing");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitAddSubImmediate(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Add/Sub DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLogicalImmediate(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Logical DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitMoveWideImmediate(const Instruction* instr) {
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Move Immediate");
|
||||
|
||||
if (instr->IsMovn() && (instr->GetRd() == kZeroRegCode)) {
|
||||
unsigned imm = instr->GetImmMoveWide();
|
||||
HandleInstrumentationEvent(imm);
|
||||
} else {
|
||||
counter->Increment();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitBitfield(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other Int DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitExtract(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other Int DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitUnconditionalBranch(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Unconditional Branch");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitUnconditionalBranchToRegister(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Unconditional Branch");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitCompareBranch(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Compare and Branch");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitTestBranch(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Test and Branch");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitConditionalBranch(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Conditional Branch");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitSystem(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitException(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::InstrumentLoadStorePair(const Instruction* instr) {
|
||||
static Counter* load_pair_counter = GetCounter("Load Pair");
|
||||
static Counter* store_pair_counter = GetCounter("Store Pair");
|
||||
|
||||
if (instr->Mask(LoadStorePairLBit) != 0) {
|
||||
load_pair_counter->Increment();
|
||||
} else {
|
||||
store_pair_counter->Increment();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStorePairPostIndex(const Instruction* instr) {
|
||||
Update();
|
||||
InstrumentLoadStorePair(instr);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStorePairOffset(const Instruction* instr) {
|
||||
Update();
|
||||
InstrumentLoadStorePair(instr);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStorePairPreIndex(const Instruction* instr) {
|
||||
Update();
|
||||
InstrumentLoadStorePair(instr);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStorePairNonTemporal(const Instruction* instr) {
|
||||
Update();
|
||||
InstrumentLoadStorePair(instr);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStoreExclusive(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitAtomicMemory(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadLiteral(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Load Literal");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::InstrumentLoadStore(const Instruction* instr) {
|
||||
static Counter* load_int_counter = GetCounter("Load Integer");
|
||||
static Counter* store_int_counter = GetCounter("Store Integer");
|
||||
static Counter* load_fp_counter = GetCounter("Load FP");
|
||||
static Counter* store_fp_counter = GetCounter("Store FP");
|
||||
|
||||
switch (instr->Mask(LoadStoreMask)) {
|
||||
case STRB_w:
|
||||
case STRH_w:
|
||||
case STR_w:
|
||||
VIXL_FALLTHROUGH();
|
||||
case STR_x:
|
||||
store_int_counter->Increment();
|
||||
break;
|
||||
case STR_s:
|
||||
VIXL_FALLTHROUGH();
|
||||
case STR_d:
|
||||
store_fp_counter->Increment();
|
||||
break;
|
||||
case LDRB_w:
|
||||
case LDRH_w:
|
||||
case LDR_w:
|
||||
case LDR_x:
|
||||
case LDRSB_x:
|
||||
case LDRSH_x:
|
||||
case LDRSW_x:
|
||||
case LDRSB_w:
|
||||
VIXL_FALLTHROUGH();
|
||||
case LDRSH_w:
|
||||
load_int_counter->Increment();
|
||||
break;
|
||||
case LDR_s:
|
||||
VIXL_FALLTHROUGH();
|
||||
case LDR_d:
|
||||
load_fp_counter->Increment();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
|
||||
Update();
|
||||
InstrumentLoadStore(instr);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStorePostIndex(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
InstrumentLoadStore(instr);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStorePreIndex(const Instruction* instr) {
|
||||
Update();
|
||||
InstrumentLoadStore(instr);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStoreRegisterOffset(const Instruction* instr) {
|
||||
Update();
|
||||
InstrumentLoadStore(instr);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
|
||||
Update();
|
||||
InstrumentLoadStore(instr);
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitLogicalShifted(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Logical DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitAddSubShifted(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Add/Sub DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitAddSubExtended(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Add/Sub DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitAddSubWithCarry(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Add/Sub DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitConditionalCompareRegister(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Conditional Compare");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitConditionalCompareImmediate(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Conditional Compare");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitConditionalSelect(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Conditional Select");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitDataProcessing1Source(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other Int DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitDataProcessing2Source(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other Int DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitDataProcessing3Source(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other Int DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitFPCompare(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("FP DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitFPConditionalCompare(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Conditional Compare");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitFPConditionalSelect(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Conditional Select");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitFPImmediate(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("FP DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitFPDataProcessing1Source(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("FP DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitFPDataProcessing2Source(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("FP DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitFPDataProcessing3Source(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("FP DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitFPIntegerConvert(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("FP DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitFPFixedPointConvert(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("FP DP");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitCrypto2RegSHA(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Crypto");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitCrypto3RegSHA(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Crypto");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitCryptoAES(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Crypto");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEON2RegMisc(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEON2RegMiscFP16(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEON3Same(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEON3SameFP16(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEON3SameExtra(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEON3Different(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONAcrossLanes(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONByIndexedElement(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONCopy(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONExtract(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONLoadStoreMultiStructPostIndex(
|
||||
const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONLoadStoreSingleStructPostIndex(
|
||||
const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONModifiedImmediate(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalar2RegMisc(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalar2RegMiscFP16(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalar3Diff(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalar3Same(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalar3SameFP16(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalar3SameExtra(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalarByIndexedElement(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalarCopy(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalarPairwise(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONScalarShiftImmediate(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONShiftImmediate(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONTable(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitNEONPerm(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("NEON");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitUnallocated(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
void Instrument::VisitUnimplemented(const Instruction* instr) {
|
||||
USE(instr);
|
||||
Update();
|
||||
static Counter* counter = GetCounter("Other");
|
||||
counter->Increment();
|
||||
}
|
||||
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
5340
dep/vixl/src/aarch64/logic-aarch64.cc
Normal file
5340
dep/vixl/src/aarch64/logic-aarch64.cc
Normal file
File diff suppressed because it is too large
Load Diff
3024
dep/vixl/src/aarch64/macro-assembler-aarch64.cc
Normal file
3024
dep/vixl/src/aarch64/macro-assembler-aarch64.cc
Normal file
File diff suppressed because it is too large
Load Diff
528
dep/vixl/src/aarch64/operands-aarch64.cc
Normal file
528
dep/vixl/src/aarch64/operands-aarch64.cc
Normal file
@ -0,0 +1,528 @@
|
||||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "operands-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// CPURegList utilities.
|
||||
CPURegister CPURegList::PopLowestIndex() {
|
||||
if (IsEmpty()) {
|
||||
return NoCPUReg;
|
||||
}
|
||||
int index = CountTrailingZeros(list_);
|
||||
VIXL_ASSERT((1 << index) & list_);
|
||||
Remove(index);
|
||||
return CPURegister(index, size_, type_);
|
||||
}
|
||||
|
||||
|
||||
CPURegister CPURegList::PopHighestIndex() {
|
||||
VIXL_ASSERT(IsValid());
|
||||
if (IsEmpty()) {
|
||||
return NoCPUReg;
|
||||
}
|
||||
int index = CountLeadingZeros(list_);
|
||||
index = kRegListSizeInBits - 1 - index;
|
||||
VIXL_ASSERT((1 << index) & list_);
|
||||
Remove(index);
|
||||
return CPURegister(index, size_, type_);
|
||||
}
|
||||
|
||||
|
||||
bool CPURegList::IsValid() const {
|
||||
if ((type_ == CPURegister::kRegister) || (type_ == CPURegister::kVRegister)) {
|
||||
bool is_valid = true;
|
||||
// Try to create a CPURegister for each element in the list.
|
||||
for (int i = 0; i < kRegListSizeInBits; i++) {
|
||||
if (((list_ >> i) & 1) != 0) {
|
||||
is_valid &= CPURegister(i, size_, type_).IsValid();
|
||||
}
|
||||
}
|
||||
return is_valid;
|
||||
} else if (type_ == CPURegister::kNoRegister) {
|
||||
// We can't use IsEmpty here because that asserts IsValid().
|
||||
return list_ == 0;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CPURegList::RemoveCalleeSaved() {
|
||||
if (GetType() == CPURegister::kRegister) {
|
||||
Remove(GetCalleeSaved(GetRegisterSizeInBits()));
|
||||
} else if (GetType() == CPURegister::kVRegister) {
|
||||
Remove(GetCalleeSavedV(GetRegisterSizeInBits()));
|
||||
} else {
|
||||
VIXL_ASSERT(GetType() == CPURegister::kNoRegister);
|
||||
VIXL_ASSERT(IsEmpty());
|
||||
// The list must already be empty, so do nothing.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3) {
|
||||
return Union(list_1, Union(list_2, list_3));
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4) {
|
||||
return Union(Union(list_1, list_2), Union(list_3, list_4));
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3) {
|
||||
return Intersection(list_1, Intersection(list_2, list_3));
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4) {
|
||||
return Intersection(Intersection(list_1, list_2),
|
||||
Intersection(list_3, list_4));
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCalleeSaved(unsigned size) {
|
||||
return CPURegList(CPURegister::kRegister, size, 19, 29);
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
|
||||
return CPURegList(CPURegister::kVRegister, size, 8, 15);
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCallerSaved(unsigned size) {
|
||||
// Registers x0-x18 and lr (x30) are caller-saved.
|
||||
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
|
||||
// Do not use lr directly to avoid initialisation order fiasco bugs for users.
|
||||
list.Combine(Register(30, kXRegSize));
|
||||
return list;
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCallerSavedV(unsigned size) {
|
||||
// Registers d0-d7 and d16-d31 are caller-saved.
|
||||
CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
|
||||
list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
|
||||
return list;
|
||||
}
|
||||
|
||||
|
||||
const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
|
||||
const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
|
||||
const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
|
||||
const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
|
||||
|
||||
|
||||
// Registers.
|
||||
#define WREG(n) w##n,
|
||||
const Register Register::wregisters[] = {AARCH64_REGISTER_CODE_LIST(WREG)};
|
||||
#undef WREG
|
||||
|
||||
#define XREG(n) x##n,
|
||||
const Register Register::xregisters[] = {AARCH64_REGISTER_CODE_LIST(XREG)};
|
||||
#undef XREG
|
||||
|
||||
#define BREG(n) b##n,
|
||||
const VRegister VRegister::bregisters[] = {AARCH64_REGISTER_CODE_LIST(BREG)};
|
||||
#undef BREG
|
||||
|
||||
#define HREG(n) h##n,
|
||||
const VRegister VRegister::hregisters[] = {AARCH64_REGISTER_CODE_LIST(HREG)};
|
||||
#undef HREG
|
||||
|
||||
#define SREG(n) s##n,
|
||||
const VRegister VRegister::sregisters[] = {AARCH64_REGISTER_CODE_LIST(SREG)};
|
||||
#undef SREG
|
||||
|
||||
#define DREG(n) d##n,
|
||||
const VRegister VRegister::dregisters[] = {AARCH64_REGISTER_CODE_LIST(DREG)};
|
||||
#undef DREG
|
||||
|
||||
#define QREG(n) q##n,
|
||||
const VRegister VRegister::qregisters[] = {AARCH64_REGISTER_CODE_LIST(QREG)};
|
||||
#undef QREG
|
||||
|
||||
#define VREG(n) v##n,
|
||||
const VRegister VRegister::vregisters[] = {AARCH64_REGISTER_CODE_LIST(VREG)};
|
||||
#undef VREG
|
||||
|
||||
|
||||
const Register& Register::GetWRegFromCode(unsigned code) {
|
||||
if (code == kSPRegInternalCode) {
|
||||
return wsp;
|
||||
} else {
|
||||
VIXL_ASSERT(code < kNumberOfRegisters);
|
||||
return wregisters[code];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const Register& Register::GetXRegFromCode(unsigned code) {
|
||||
if (code == kSPRegInternalCode) {
|
||||
return sp;
|
||||
} else {
|
||||
VIXL_ASSERT(code < kNumberOfRegisters);
|
||||
return xregisters[code];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const VRegister& VRegister::GetBRegFromCode(unsigned code) {
|
||||
VIXL_ASSERT(code < kNumberOfVRegisters);
|
||||
return bregisters[code];
|
||||
}
|
||||
|
||||
|
||||
const VRegister& VRegister::GetHRegFromCode(unsigned code) {
|
||||
VIXL_ASSERT(code < kNumberOfVRegisters);
|
||||
return hregisters[code];
|
||||
}
|
||||
|
||||
|
||||
const VRegister& VRegister::GetSRegFromCode(unsigned code) {
|
||||
VIXL_ASSERT(code < kNumberOfVRegisters);
|
||||
return sregisters[code];
|
||||
}
|
||||
|
||||
|
||||
const VRegister& VRegister::GetDRegFromCode(unsigned code) {
|
||||
VIXL_ASSERT(code < kNumberOfVRegisters);
|
||||
return dregisters[code];
|
||||
}
|
||||
|
||||
|
||||
const VRegister& VRegister::GetQRegFromCode(unsigned code) {
|
||||
VIXL_ASSERT(code < kNumberOfVRegisters);
|
||||
return qregisters[code];
|
||||
}
|
||||
|
||||
|
||||
const VRegister& VRegister::GetVRegFromCode(unsigned code) {
|
||||
VIXL_ASSERT(code < kNumberOfVRegisters);
|
||||
return vregisters[code];
|
||||
}
|
||||
|
||||
|
||||
const Register& CPURegister::W() const {
|
||||
VIXL_ASSERT(IsValidRegister());
|
||||
return Register::GetWRegFromCode(code_);
|
||||
}
|
||||
|
||||
|
||||
const Register& CPURegister::X() const {
|
||||
VIXL_ASSERT(IsValidRegister());
|
||||
return Register::GetXRegFromCode(code_);
|
||||
}
|
||||
|
||||
|
||||
const VRegister& CPURegister::B() const {
|
||||
VIXL_ASSERT(IsValidVRegister());
|
||||
return VRegister::GetBRegFromCode(code_);
|
||||
}
|
||||
|
||||
|
||||
const VRegister& CPURegister::H() const {
|
||||
VIXL_ASSERT(IsValidVRegister());
|
||||
return VRegister::GetHRegFromCode(code_);
|
||||
}
|
||||
|
||||
|
||||
const VRegister& CPURegister::S() const {
|
||||
VIXL_ASSERT(IsValidVRegister());
|
||||
return VRegister::GetSRegFromCode(code_);
|
||||
}
|
||||
|
||||
|
||||
const VRegister& CPURegister::D() const {
|
||||
VIXL_ASSERT(IsValidVRegister());
|
||||
return VRegister::GetDRegFromCode(code_);
|
||||
}
|
||||
|
||||
|
||||
const VRegister& CPURegister::Q() const {
|
||||
VIXL_ASSERT(IsValidVRegister());
|
||||
return VRegister::GetQRegFromCode(code_);
|
||||
}
|
||||
|
||||
|
||||
const VRegister& CPURegister::V() const {
|
||||
VIXL_ASSERT(IsValidVRegister());
|
||||
return VRegister::GetVRegFromCode(code_);
|
||||
}
|
||||
|
||||
|
||||
// Operand.
|
||||
Operand::Operand(int64_t immediate)
|
||||
: immediate_(immediate),
|
||||
reg_(NoReg),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(0) {}
|
||||
|
||||
|
||||
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
|
||||
: reg_(reg),
|
||||
shift_(shift),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(shift != MSL);
|
||||
VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
|
||||
VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
|
||||
VIXL_ASSERT(!reg.IsSP());
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
|
||||
: reg_(reg),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(extend),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(reg.IsValid());
|
||||
VIXL_ASSERT(shift_amount <= 4);
|
||||
VIXL_ASSERT(!reg.IsSP());
|
||||
|
||||
// Extend modes SXTX and UXTX require a 64-bit register.
|
||||
VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
|
||||
}
|
||||
|
||||
|
||||
bool Operand::IsImmediate() const { return reg_.Is(NoReg); }
|
||||
|
||||
|
||||
bool Operand::IsPlainRegister() const {
|
||||
return reg_.IsValid() &&
|
||||
(((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||
|
||||
// No-op shifts.
|
||||
((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||
|
||||
// No-op extend operations.
|
||||
// We can't include [US]XTW here without knowing more about the
|
||||
// context; they are only no-ops for 32-bit operations.
|
||||
//
|
||||
// For example, this operand could be replaced with w1:
|
||||
// __ Add(w0, w0, Operand(w1, UXTW));
|
||||
// However, no plain register can replace it in this context:
|
||||
// __ Add(x0, x0, Operand(w1, UXTW));
|
||||
(((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));
|
||||
}
|
||||
|
||||
|
||||
bool Operand::IsShiftedRegister() const {
|
||||
return reg_.IsValid() && (shift_ != NO_SHIFT);
|
||||
}
|
||||
|
||||
|
||||
bool Operand::IsExtendedRegister() const {
|
||||
return reg_.IsValid() && (extend_ != NO_EXTEND);
|
||||
}
|
||||
|
||||
|
||||
bool Operand::IsZero() const {
|
||||
if (IsImmediate()) {
|
||||
return GetImmediate() == 0;
|
||||
} else {
|
||||
return GetRegister().IsZero();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Operand Operand::ToExtendedRegister() const {
|
||||
VIXL_ASSERT(IsShiftedRegister());
|
||||
VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
|
||||
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
|
||||
}
|
||||
|
||||
|
||||
// MemOperand
|
||||
MemOperand::MemOperand()
|
||||
: base_(NoReg),
|
||||
regoffset_(NoReg),
|
||||
offset_(0),
|
||||
addrmode_(Offset),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(NO_EXTEND) {}
|
||||
|
||||
|
||||
MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
|
||||
: base_(base),
|
||||
regoffset_(NoReg),
|
||||
offset_(offset),
|
||||
addrmode_(addrmode),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(0) {
|
||||
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
}
|
||||
|
||||
|
||||
MemOperand::MemOperand(Register base,
|
||||
Register regoffset,
|
||||
Extend extend,
|
||||
unsigned shift_amount)
|
||||
: base_(base),
|
||||
regoffset_(regoffset),
|
||||
offset_(0),
|
||||
addrmode_(Offset),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(extend),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
VIXL_ASSERT(!regoffset.IsSP());
|
||||
VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
|
||||
|
||||
// SXTX extend mode requires a 64-bit offset register.
|
||||
VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
|
||||
}
|
||||
|
||||
|
||||
MemOperand::MemOperand(Register base,
|
||||
Register regoffset,
|
||||
Shift shift,
|
||||
unsigned shift_amount)
|
||||
: base_(base),
|
||||
regoffset_(regoffset),
|
||||
offset_(0),
|
||||
addrmode_(Offset),
|
||||
shift_(shift),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
|
||||
VIXL_ASSERT(shift == LSL);
|
||||
}
|
||||
|
||||
|
||||
MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
|
||||
: base_(base),
|
||||
regoffset_(NoReg),
|
||||
addrmode_(addrmode),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(0) {
|
||||
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
|
||||
if (offset.IsImmediate()) {
|
||||
offset_ = offset.GetImmediate();
|
||||
} else if (offset.IsShiftedRegister()) {
|
||||
VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
|
||||
|
||||
regoffset_ = offset.GetRegister();
|
||||
shift_ = offset.GetShift();
|
||||
shift_amount_ = offset.GetShiftAmount();
|
||||
|
||||
extend_ = NO_EXTEND;
|
||||
offset_ = 0;
|
||||
|
||||
// These assertions match those in the shifted-register constructor.
|
||||
VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
|
||||
VIXL_ASSERT(shift_ == LSL);
|
||||
} else {
|
||||
VIXL_ASSERT(offset.IsExtendedRegister());
|
||||
VIXL_ASSERT(addrmode == Offset);
|
||||
|
||||
regoffset_ = offset.GetRegister();
|
||||
extend_ = offset.GetExtend();
|
||||
shift_amount_ = offset.GetShiftAmount();
|
||||
|
||||
shift_ = NO_SHIFT;
|
||||
offset_ = 0;
|
||||
|
||||
// These assertions match those in the extended-register constructor.
|
||||
VIXL_ASSERT(!regoffset_.IsSP());
|
||||
VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
|
||||
VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool MemOperand::IsImmediateOffset() const {
|
||||
return (addrmode_ == Offset) && regoffset_.Is(NoReg);
|
||||
}
|
||||
|
||||
|
||||
bool MemOperand::IsRegisterOffset() const {
|
||||
return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
|
||||
}
|
||||
|
||||
|
||||
bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
|
||||
|
||||
|
||||
bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
|
||||
|
||||
|
||||
void MemOperand::AddOffset(int64_t offset) {
|
||||
VIXL_ASSERT(IsImmediateOffset());
|
||||
offset_ += offset;
|
||||
}
|
||||
|
||||
|
||||
GenericOperand::GenericOperand(const CPURegister& reg)
|
||||
: cpu_register_(reg), mem_op_size_(0) {
|
||||
if (reg.IsQ()) {
|
||||
VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));
|
||||
// Support for Q registers is not implemented yet.
|
||||
VIXL_UNIMPLEMENTED();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)
|
||||
: cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {
|
||||
if (mem_op_size_ > kXRegSizeInBytes) {
|
||||
// We only support generic operands up to the size of X registers.
|
||||
VIXL_UNIMPLEMENTED();
|
||||
}
|
||||
}
|
||||
|
||||
bool GenericOperand::Equals(const GenericOperand& other) const {
|
||||
if (!IsValid() || !other.IsValid()) {
|
||||
// Two invalid generic operands are considered equal.
|
||||
return !IsValid() && !other.IsValid();
|
||||
}
|
||||
if (IsCPURegister() && other.IsCPURegister()) {
|
||||
return GetCPURegister().Is(other.GetCPURegister());
|
||||
} else if (IsMemOperand() && other.IsMemOperand()) {
|
||||
return GetMemOperand().Equals(other.GetMemOperand()) &&
|
||||
(GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} // namespace vixl::aarch64
|
197
dep/vixl/src/aarch64/pointer-auth-aarch64.cc
Normal file
197
dep/vixl/src/aarch64/pointer-auth-aarch64.cc
Normal file
@ -0,0 +1,197 @@
|
||||
// Copyright 2018, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
|
||||
#include "simulator-aarch64.h"
|
||||
|
||||
#include "utils-vixl.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// Randomly generated example keys for simulating only.
|
||||
const Simulator::PACKey Simulator::kPACKeyIA = {0xc31718727de20f71,
|
||||
0xab9fd4e14b2fec51,
|
||||
0};
|
||||
const Simulator::PACKey Simulator::kPACKeyIB = {0xeebb163b474e04c8,
|
||||
0x5267ac6fc280fb7c,
|
||||
1};
|
||||
const Simulator::PACKey Simulator::kPACKeyDA = {0x5caef808deb8b1e2,
|
||||
0xd347cbc06b7b0f77,
|
||||
0};
|
||||
const Simulator::PACKey Simulator::kPACKeyDB = {0xe06aa1a949ba8cc7,
|
||||
0xcfde69e3db6d0432,
|
||||
1};
|
||||
|
||||
// The general PAC key isn't intended to be used with AuthPAC so we ensure the
|
||||
// key number is invalid and asserts if used incorrectly.
|
||||
const Simulator::PACKey Simulator::kPACKeyGA = {0xfcd98a44d564b3d5,
|
||||
0x6c56df1904bf0ddc,
|
||||
-1};
|
||||
|
||||
static uint64_t GetNibble(uint64_t in_data, int position) {
|
||||
return (in_data >> position) & 0xf;
|
||||
}
|
||||
|
||||
static uint64_t ShuffleNibbles(uint64_t in_data) {
|
||||
static int in_positions[16] =
|
||||
{4, 36, 52, 40, 44, 0, 24, 12, 56, 60, 8, 32, 16, 28, 20, 48};
|
||||
uint64_t out_data = 0;
|
||||
for (int i = 0; i < 16; i++) {
|
||||
out_data |= GetNibble(in_data, in_positions[i]) << (4 * i);
|
||||
}
|
||||
return out_data;
|
||||
}
|
||||
|
||||
static uint64_t SubstituteNibbles(uint64_t in_data) {
|
||||
// Randomly chosen substitutes.
|
||||
static uint64_t subs[16] =
|
||||
{4, 7, 3, 9, 10, 14, 0, 1, 15, 2, 8, 6, 12, 5, 11, 13};
|
||||
uint64_t out_data = 0;
|
||||
for (int i = 0; i < 16; i++) {
|
||||
int index = (in_data >> (4 * i)) & 0xf;
|
||||
out_data |= subs[index] << (4 * i);
|
||||
}
|
||||
return out_data;
|
||||
}
|
||||
|
||||
// Rotate nibble to the left by the amount specified.
|
||||
static uint64_t RotNibble(uint64_t in_cell, int amount) {
|
||||
VIXL_ASSERT((amount >= 0) && (amount <= 3));
|
||||
|
||||
in_cell &= 0xf;
|
||||
uint64_t temp = (in_cell << 4) | in_cell;
|
||||
return (temp >> (4 - amount)) & 0xf;
|
||||
}
|
||||
|
||||
static uint64_t BigShuffle(uint64_t in_data) {
|
||||
uint64_t out_data = 0;
|
||||
for (int i = 0; i < 4; i++) {
|
||||
uint64_t n12 = GetNibble(in_data, 4 * (i + 12));
|
||||
uint64_t n8 = GetNibble(in_data, 4 * (i + 8));
|
||||
uint64_t n4 = GetNibble(in_data, 4 * (i + 4));
|
||||
uint64_t n0 = GetNibble(in_data, 4 * (i + 0));
|
||||
|
||||
uint64_t t0 = RotNibble(n8, 2) ^ RotNibble(n4, 1) ^ RotNibble(n0, 1);
|
||||
uint64_t t1 = RotNibble(n12, 1) ^ RotNibble(n4, 2) ^ RotNibble(n0, 1);
|
||||
uint64_t t2 = RotNibble(n12, 2) ^ RotNibble(n8, 1) ^ RotNibble(n0, 1);
|
||||
uint64_t t3 = RotNibble(n12, 1) ^ RotNibble(n8, 1) ^ RotNibble(n4, 2);
|
||||
|
||||
out_data |= t3 << (4 * (i + 0));
|
||||
out_data |= t2 << (4 * (i + 4));
|
||||
out_data |= t1 << (4 * (i + 8));
|
||||
out_data |= t0 << (4 * (i + 12));
|
||||
}
|
||||
return out_data;
|
||||
}
|
||||
|
||||
// A simple, non-standard hash function invented for simulating. It mixes
|
||||
// reasonably well, however it is unlikely to be cryptographically secure and
|
||||
// may have a higher collision chance than other hashing algorithms.
|
||||
uint64_t Simulator::ComputePAC(uint64_t data, uint64_t context, PACKey key) {
|
||||
uint64_t working_value = data ^ key.high;
|
||||
working_value = BigShuffle(working_value);
|
||||
working_value = ShuffleNibbles(working_value);
|
||||
working_value ^= key.low;
|
||||
working_value = ShuffleNibbles(working_value);
|
||||
working_value = BigShuffle(working_value);
|
||||
working_value ^= context;
|
||||
working_value = SubstituteNibbles(working_value);
|
||||
working_value = BigShuffle(working_value);
|
||||
working_value = SubstituteNibbles(working_value);
|
||||
|
||||
return working_value;
|
||||
}
|
||||
|
||||
// The TTBR is selected by bit 63 or 55 depending on TBI for pointers without
|
||||
// codes, but is always 55 once a PAC code is added to a pointer. For this
|
||||
// reason, it must be calculated at the call site.
|
||||
uint64_t Simulator::CalculatePACMask(uint64_t ptr, PointerType type, int ttbr) {
|
||||
int bottom_pac_bit = GetBottomPACBit(ptr, ttbr);
|
||||
int top_pac_bit = GetTopPACBit(ptr, type);
|
||||
return ExtractUnsignedBitfield64(top_pac_bit,
|
||||
bottom_pac_bit,
|
||||
0xffffffffffffffff & ~kTTBRMask)
|
||||
<< bottom_pac_bit;
|
||||
}
|
||||
|
||||
uint64_t Simulator::AuthPAC(uint64_t ptr,
|
||||
uint64_t context,
|
||||
PACKey key,
|
||||
PointerType type) {
|
||||
VIXL_ASSERT((key.number == 0) || (key.number == 1));
|
||||
|
||||
uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
|
||||
uint64_t original_ptr =
|
||||
((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
|
||||
|
||||
uint64_t pac = ComputePAC(original_ptr, context, key);
|
||||
|
||||
uint64_t error_code = 1 << key.number;
|
||||
if ((pac & pac_mask) == (ptr & pac_mask)) {
|
||||
return original_ptr;
|
||||
} else {
|
||||
int error_lsb = GetTopPACBit(ptr, type) - 2;
|
||||
uint64_t error_mask = UINT64_C(0x3) << error_lsb;
|
||||
return (original_ptr & ~error_mask) | (error_code << error_lsb);
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t Simulator::AddPAC(uint64_t ptr,
|
||||
uint64_t context,
|
||||
PACKey key,
|
||||
PointerType type) {
|
||||
int top_pac_bit = GetTopPACBit(ptr, type);
|
||||
|
||||
// TODO: Properly handle the case where extension bits are bad and TBI is
|
||||
// turned off, and also test me.
|
||||
VIXL_ASSERT(HasTBI(ptr, type));
|
||||
int ttbr = (ptr >> 55) & 1;
|
||||
uint64_t pac_mask = CalculatePACMask(ptr, type, ttbr);
|
||||
uint64_t ext_ptr = (ttbr == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
|
||||
|
||||
uint64_t pac = ComputePAC(ext_ptr, context, key);
|
||||
|
||||
// If the pointer isn't all zeroes or all ones in the PAC bitfield, corrupt
|
||||
// the resulting code.
|
||||
if (((ptr & (pac_mask | kTTBRMask)) != 0x0) &&
|
||||
((~ptr & (pac_mask | kTTBRMask)) != 0x0)) {
|
||||
pac ^= UINT64_C(1) << (top_pac_bit - 1);
|
||||
}
|
||||
|
||||
uint64_t ttbr_shifted = static_cast<uint64_t>(ttbr) << 55;
|
||||
return (pac & pac_mask) | ttbr_shifted | (ptr & ~pac_mask);
|
||||
}
|
||||
|
||||
uint64_t Simulator::StripPAC(uint64_t ptr, PointerType type) {
|
||||
uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
|
||||
return ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
|
||||
}
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
6658
dep/vixl/src/aarch64/simulator-aarch64.cc
Normal file
6658
dep/vixl/src/aarch64/simulator-aarch64.cc
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user