dep: Add vixl (AArch32/64 assembler)

This commit is contained in:
Connor McLaughlin
2019-12-04 20:11:06 +10:00
parent baaa94d4c1
commit d520ca35eb
61 changed files with 178153 additions and 1 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,541 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CONSTANTS_AARCH32_H_
#define VIXL_CONSTANTS_AARCH32_H_
extern "C" {
#include <stdint.h>
}
#include "globals-vixl.h"
namespace vixl {
namespace aarch32 {
enum InstructionSet { A32, T32 };
#ifdef VIXL_INCLUDE_TARGET_T32_ONLY
const InstructionSet kDefaultISA = T32;
#else
const InstructionSet kDefaultISA = A32;
#endif
const unsigned kRegSizeInBits = 32;
const unsigned kRegSizeInBytes = kRegSizeInBits / 8;
const unsigned kSRegSizeInBits = 32;
const unsigned kSRegSizeInBytes = kSRegSizeInBits / 8;
const unsigned kDRegSizeInBits = 64;
const unsigned kDRegSizeInBytes = kDRegSizeInBits / 8;
const unsigned kQRegSizeInBits = 128;
const unsigned kQRegSizeInBytes = kQRegSizeInBits / 8;
const unsigned kNumberOfRegisters = 16;
const unsigned kNumberOfSRegisters = 32;
const unsigned kMaxNumberOfDRegisters = 32;
const unsigned kNumberOfQRegisters = 16;
const unsigned kNumberOfT32LowRegisters = 8;
const unsigned kIpCode = 12;
const unsigned kSpCode = 13;
const unsigned kLrCode = 14;
const unsigned kPcCode = 15;
const unsigned kT32PcDelta = 4;
const unsigned kA32PcDelta = 8;
const unsigned kRRXEncodedValue = 3;
const unsigned kCoprocMask = 0xe;
const unsigned kInvalidCoprocMask = 0xa;
const unsigned kLowestT32_32Opcode = 0xe8000000;
const uint32_t kUnknownValue = 0xdeadbeef;
const uint32_t kMaxInstructionSizeInBytes = 4;
const uint32_t kA32InstructionSizeInBytes = 4;
const uint32_t k32BitT32InstructionSizeInBytes = 4;
const uint32_t k16BitT32InstructionSizeInBytes = 2;
// Maximum size emitted by a single T32 unconditional macro-instruction.
const uint32_t kMaxT32MacroInstructionSizeInBytes = 32;
const uint32_t kCallerSavedRegistersMask = 0x500f;
const uint16_t k16BitT32NopOpcode = 0xbf00;
const uint16_t kCbzCbnzMask = 0xf500;
const uint16_t kCbzCbnzValue = 0xb100;
const int32_t kCbzCbnzRange = 126;
const int32_t kBConditionalNarrowRange = 254;
const int32_t kBNarrowRange = 2046;
const int32_t kNearLabelRange = kBNarrowRange;
enum SystemFunctionsOpcodes { kPrintfCode };
enum BranchHint { kNear, kFar, kBranchWithoutHint };
// Start of generated code.
// AArch32 version implemented by the library (v8.0).
// The encoding for vX.Y is: (X << 8) | Y.
#define AARCH32_VERSION 0x0800
enum InstructionAttribute {
kNoAttribute = 0,
kArithmetic = 0x1,
kBitwise = 0x2,
kShift = 0x4,
kAddress = 0x8,
kBranch = 0x10,
kSystem = 0x20,
kFpNeon = 0x40,
kLoadStore = 0x80,
kLoadStoreMultiple = 0x100
};
enum InstructionType {
kUndefInstructionType,
kAdc,
kAdcs,
kAdd,
kAdds,
kAddw,
kAdr,
kAnd,
kAnds,
kAsr,
kAsrs,
kB,
kBfc,
kBfi,
kBic,
kBics,
kBkpt,
kBl,
kBlx,
kBx,
kBxj,
kCbnz,
kCbz,
kClrex,
kClz,
kCmn,
kCmp,
kCrc32b,
kCrc32cb,
kCrc32ch,
kCrc32cw,
kCrc32h,
kCrc32w,
kDmb,
kDsb,
kEor,
kEors,
kFldmdbx,
kFldmiax,
kFstmdbx,
kFstmiax,
kHlt,
kHvc,
kIsb,
kIt,
kLda,
kLdab,
kLdaex,
kLdaexb,
kLdaexd,
kLdaexh,
kLdah,
kLdm,
kLdmda,
kLdmdb,
kLdmea,
kLdmed,
kLdmfa,
kLdmfd,
kLdmib,
kLdr,
kLdrb,
kLdrd,
kLdrex,
kLdrexb,
kLdrexd,
kLdrexh,
kLdrh,
kLdrsb,
kLdrsh,
kLsl,
kLsls,
kLsr,
kLsrs,
kMla,
kMlas,
kMls,
kMov,
kMovs,
kMovt,
kMovw,
kMrs,
kMsr,
kMul,
kMuls,
kMvn,
kMvns,
kNop,
kOrn,
kOrns,
kOrr,
kOrrs,
kPkhbt,
kPkhtb,
kPld,
kPldw,
kPli,
kPop,
kPush,
kQadd,
kQadd16,
kQadd8,
kQasx,
kQdadd,
kQdsub,
kQsax,
kQsub,
kQsub16,
kQsub8,
kRbit,
kRev,
kRev16,
kRevsh,
kRor,
kRors,
kRrx,
kRrxs,
kRsb,
kRsbs,
kRsc,
kRscs,
kSadd16,
kSadd8,
kSasx,
kSbc,
kSbcs,
kSbfx,
kSdiv,
kSel,
kShadd16,
kShadd8,
kShasx,
kShsax,
kShsub16,
kShsub8,
kSmlabb,
kSmlabt,
kSmlad,
kSmladx,
kSmlal,
kSmlalbb,
kSmlalbt,
kSmlald,
kSmlaldx,
kSmlals,
kSmlaltb,
kSmlaltt,
kSmlatb,
kSmlatt,
kSmlawb,
kSmlawt,
kSmlsd,
kSmlsdx,
kSmlsld,
kSmlsldx,
kSmmla,
kSmmlar,
kSmmls,
kSmmlsr,
kSmmul,
kSmmulr,
kSmuad,
kSmuadx,
kSmulbb,
kSmulbt,
kSmull,
kSmulls,
kSmultb,
kSmultt,
kSmulwb,
kSmulwt,
kSmusd,
kSmusdx,
kSsat,
kSsat16,
kSsax,
kSsub16,
kSsub8,
kStl,
kStlb,
kStlex,
kStlexb,
kStlexd,
kStlexh,
kStlh,
kStm,
kStmda,
kStmdb,
kStmea,
kStmed,
kStmfa,
kStmfd,
kStmib,
kStr,
kStrb,
kStrd,
kStrex,
kStrexb,
kStrexd,
kStrexh,
kStrh,
kSub,
kSubs,
kSubw,
kSvc,
kSxtab,
kSxtab16,
kSxtah,
kSxtb,
kSxtb16,
kSxth,
kTbb,
kTbh,
kTeq,
kTst,
kUadd16,
kUadd8,
kUasx,
kUbfx,
kUdf,
kUdiv,
kUhadd16,
kUhadd8,
kUhasx,
kUhsax,
kUhsub16,
kUhsub8,
kUmaal,
kUmlal,
kUmlals,
kUmull,
kUmulls,
kUqadd16,
kUqadd8,
kUqasx,
kUqsax,
kUqsub16,
kUqsub8,
kUsad8,
kUsada8,
kUsat,
kUsat16,
kUsax,
kUsub16,
kUsub8,
kUxtab,
kUxtab16,
kUxtah,
kUxtb,
kUxtb16,
kUxth,
kVaba,
kVabal,
kVabd,
kVabdl,
kVabs,
kVacge,
kVacgt,
kVacle,
kVaclt,
kVadd,
kVaddhn,
kVaddl,
kVaddw,
kVand,
kVbic,
kVbif,
kVbit,
kVbsl,
kVceq,
kVcge,
kVcgt,
kVcle,
kVcls,
kVclt,
kVclz,
kVcmp,
kVcmpe,
kVcnt,
kVcvt,
kVcvta,
kVcvtb,
kVcvtm,
kVcvtn,
kVcvtp,
kVcvtr,
kVcvtt,
kVdiv,
kVdup,
kVeor,
kVext,
kVfma,
kVfms,
kVfnma,
kVfnms,
kVhadd,
kVhsub,
kVld1,
kVld2,
kVld3,
kVld4,
kVldm,
kVldmdb,
kVldmia,
kVldr,
kVmax,
kVmaxnm,
kVmin,
kVminnm,
kVmla,
kVmlal,
kVmls,
kVmlsl,
kVmov,
kVmovl,
kVmovn,
kVmrs,
kVmsr,
kVmul,
kVmull,
kVmvn,
kVneg,
kVnmla,
kVnmls,
kVnmul,
kVorn,
kVorr,
kVpadal,
kVpadd,
kVpaddl,
kVpmax,
kVpmin,
kVpop,
kVpush,
kVqabs,
kVqadd,
kVqdmlal,
kVqdmlsl,
kVqdmulh,
kVqdmull,
kVqmovn,
kVqmovun,
kVqneg,
kVqrdmulh,
kVqrshl,
kVqrshrn,
kVqrshrun,
kVqshl,
kVqshlu,
kVqshrn,
kVqshrun,
kVqsub,
kVraddhn,
kVrecpe,
kVrecps,
kVrev16,
kVrev32,
kVrev64,
kVrhadd,
kVrinta,
kVrintm,
kVrintn,
kVrintp,
kVrintr,
kVrintx,
kVrintz,
kVrshl,
kVrshr,
kVrshrn,
kVrsqrte,
kVrsqrts,
kVrsra,
kVrsubhn,
kVseleq,
kVselge,
kVselgt,
kVselvs,
kVshl,
kVshll,
kVshr,
kVshrn,
kVsli,
kVsqrt,
kVsra,
kVsri,
kVst1,
kVst2,
kVst3,
kVst4,
kVstm,
kVstmdb,
kVstmia,
kVstr,
kVsub,
kVsubhn,
kVsubl,
kVsubw,
kVswp,
kVtbl,
kVtbx,
kVtrn,
kVtst,
kVuzp,
kVzip,
kYield
};
const char* ToCString(InstructionType type);
// End of generated code.
inline InstructionAttribute operator|(InstructionAttribute left,
InstructionAttribute right) {
return static_cast<InstructionAttribute>(static_cast<uint32_t>(left) |
static_cast<uint32_t>(right));
}
} // namespace aarch32
} // namespace vixl
#endif // VIXL_CONSTANTS_AARCH32_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,411 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH32_LABEL_AARCH32_H_
#define VIXL_AARCH32_LABEL_AARCH32_H_
extern "C" {
#include <stdint.h>
}
#include <algorithm>
#include <cstddef>
#include <iomanip>
#include <list>
#include "invalset-vixl.h"
#include "pool-manager.h"
#include "utils-vixl.h"
#include "constants-aarch32.h"
#include "instructions-aarch32.h"
namespace vixl {
namespace aarch32 {
class MacroAssembler;
class Location : public LocationBase<int32_t> {
friend class Assembler;
friend class MacroAssembler;
public:
// Unbound location that can be used with the assembler bind() method and
// with the assembler methods for generating instructions, but will never
// be handled by the pool manager.
Location()
: LocationBase<int32_t>(kRawLocation, 1 /* dummy size*/),
referenced_(false) {}
typedef int32_t Offset;
~Location() {
#ifdef VIXL_DEBUG
if (IsReferenced() && !IsBound()) {
VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n");
}
#endif
}
bool IsReferenced() const { return referenced_; }
private:
class EmitOperator {
public:
explicit EmitOperator(InstructionSet isa) : isa_(isa) {
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
USE(isa_);
VIXL_ASSERT(isa == A32);
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
USE(isa_);
VIXL_ASSERT(isa == T32);
#endif
}
virtual ~EmitOperator() {}
virtual uint32_t Encode(uint32_t /*instr*/,
Location::Offset /*pc*/,
const Location* /*label*/) const {
return 0;
}
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
bool IsUsingT32() const { return false; }
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
bool IsUsingT32() const { return true; }
#else
bool IsUsingT32() const { return isa_ == T32; }
#endif
private:
InstructionSet isa_;
};
protected:
class ForwardRef : public ForwardReference<int32_t> {
public:
// Default constructor for InvalSet.
ForwardRef() : ForwardReference<int32_t>(0, 0, 0, 0, 1), op_(NULL) {}
ForwardRef(const Location::EmitOperator* op,
int32_t location,
int size,
int32_t min_object_location,
int32_t max_object_location,
int object_alignment = 1)
: ForwardReference<int32_t>(location,
size,
min_object_location,
max_object_location,
object_alignment),
op_(op) {}
const Location::EmitOperator* op() const { return op_; }
// We must provide comparison operators to work with InvalSet.
bool operator==(const ForwardRef& other) const {
return GetLocation() == other.GetLocation();
}
bool operator<(const ForwardRef& other) const {
return GetLocation() < other.GetLocation();
}
bool operator<=(const ForwardRef& other) const {
return GetLocation() <= other.GetLocation();
}
bool operator>(const ForwardRef& other) const {
return GetLocation() > other.GetLocation();
}
private:
const Location::EmitOperator* op_;
};
static const int kNPreallocatedElements = 4;
// The following parameters will not affect ForwardRefList in practice, as we
// resolve all references at once and clear the list, so we do not need to
// remove individual elements by invalidating them.
static const int32_t kInvalidLinkKey = INT32_MAX;
static const size_t kReclaimFrom = 512;
static const size_t kReclaimFactor = 2;
typedef InvalSet<ForwardRef,
kNPreallocatedElements,
int32_t,
kInvalidLinkKey,
kReclaimFrom,
kReclaimFactor>
ForwardRefListBase;
typedef InvalSetIterator<ForwardRefListBase> ForwardRefListIteratorBase;
class ForwardRefList : public ForwardRefListBase {
public:
ForwardRefList() : ForwardRefListBase() {}
using ForwardRefListBase::Back;
using ForwardRefListBase::Front;
};
class ForwardRefListIterator : public ForwardRefListIteratorBase {
public:
explicit ForwardRefListIterator(Location* location)
: ForwardRefListIteratorBase(&location->forward_) {}
// TODO: Remove these and use the STL-like interface instead. We'll need a
// const_iterator implemented for this.
using ForwardRefListIteratorBase::Advance;
using ForwardRefListIteratorBase::Current;
};
// For InvalSet::GetKey() and InvalSet::SetKey().
friend class InvalSet<ForwardRef,
kNPreallocatedElements,
int32_t,
kInvalidLinkKey,
kReclaimFrom,
kReclaimFactor>;
private:
virtual void ResolveReferences(internal::AssemblerBase* assembler)
VIXL_OVERRIDE;
void SetReferenced() { referenced_ = true; }
bool HasForwardReferences() const { return !forward_.empty(); }
ForwardRef GetLastForwardReference() const {
VIXL_ASSERT(HasForwardReferences());
return forward_.Back();
}
// Add forward reference to this object. Called from the assembler.
void AddForwardRef(int32_t instr_location,
const EmitOperator& op,
const ReferenceInfo* info);
// Check if we need to add padding when binding this object, in order to
// meet the minimum location requirement.
bool Needs16BitPadding(int location) const;
void EncodeLocationFor(internal::AssemblerBase* assembler,
int32_t from,
const Location::EmitOperator* encoder);
// True if the label has been used at least once.
bool referenced_;
protected:
// Types passed to LocationBase. Must be distinct for unbound Locations (not
// relevant for bound locations, as they don't have a correspoding
// PoolObject).
static const int kRawLocation = 0; // Will not be used by the pool manager.
static const int kVeneerType = 1;
static const int kLiteralType = 2;
// Contains the references to the unbound label
ForwardRefList forward_;
// To be used only by derived classes.
Location(uint32_t type, int size, int alignment)
: LocationBase<int32_t>(type, size, alignment), referenced_(false) {}
// To be used only by derived classes.
explicit Location(Offset location)
: LocationBase<int32_t>(location), referenced_(false) {}
virtual int GetMaxAlignment() const VIXL_OVERRIDE;
virtual int GetMinLocation() const VIXL_OVERRIDE;
private:
// Included to make the class concrete, however should never be called.
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE {
USE(masm);
VIXL_UNREACHABLE();
}
};
class Label : public Location {
static const int kVeneerSize = 4;
// Use an alignment of 1 for all architectures. Even though we can bind an
// unused label, because of the way the MacroAssembler works we can always be
// sure to have the correct buffer alignment for the instruction set we are
// using, so we do not need to enforce additional alignment requirements
// here.
// TODO: Consider modifying the interface of the pool manager to pass an
// optional additional alignment to Bind() in order to handle cases where the
// buffer could be unaligned.
static const int kVeneerAlignment = 1;
public:
Label() : Location(kVeneerType, kVeneerSize, kVeneerAlignment) {}
explicit Label(Offset location) : Location(location) {}
private:
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
return false;
}
virtual bool ShouldDeletePoolObjectOnPlacement() const VIXL_OVERRIDE {
return false;
}
virtual void UpdatePoolObject(PoolObject<int32_t>* object) VIXL_OVERRIDE;
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
virtual bool UsePoolObjectEmissionMargin() const VIXL_OVERRIDE {
return true;
}
virtual int32_t GetPoolObjectEmissionMargin() const VIXL_OVERRIDE {
VIXL_ASSERT(UsePoolObjectEmissionMargin() == true);
return 1 * KBytes;
}
};
class RawLiteral : public Location {
// Some load instructions require alignment to 4 bytes. Since we do
// not know what instructions will reference a literal after we place
// it, we enforce a 4 byte alignment for literals that are 4 bytes or
// larger.
static const int kLiteralAlignment = 4;
public:
enum PlacementPolicy { kPlacedWhenUsed, kManuallyPlaced };
enum DeletionPolicy {
kDeletedOnPlacementByPool,
kDeletedOnPoolDestruction,
kManuallyDeleted
};
RawLiteral(const void* addr,
int size,
PlacementPolicy placement_policy = kPlacedWhenUsed,
DeletionPolicy deletion_policy = kManuallyDeleted)
: Location(kLiteralType,
size,
(size < kLiteralAlignment) ? size : kLiteralAlignment),
addr_(addr),
manually_placed_(placement_policy == kManuallyPlaced),
deletion_policy_(deletion_policy) {
// We can't have manually placed literals that are not manually deleted.
VIXL_ASSERT(!IsManuallyPlaced() ||
(GetDeletionPolicy() == kManuallyDeleted));
}
RawLiteral(const void* addr, int size, DeletionPolicy deletion_policy)
: Location(kLiteralType,
size,
(size < kLiteralAlignment) ? size : kLiteralAlignment),
addr_(addr),
manually_placed_(false),
deletion_policy_(deletion_policy) {}
const void* GetDataAddress() const { return addr_; }
int GetSize() const { return GetPoolObjectSizeInBytes(); }
bool IsManuallyPlaced() const { return manually_placed_; }
private:
DeletionPolicy GetDeletionPolicy() const { return deletion_policy_; }
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
return GetDeletionPolicy() == kDeletedOnPlacementByPool;
}
virtual bool ShouldBeDeletedOnPoolManagerDestruction() const VIXL_OVERRIDE {
return GetDeletionPolicy() == kDeletedOnPoolDestruction;
}
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
// Data address before it's moved into the code buffer.
const void* const addr_;
// When this flag is true, the label will be placed manually.
bool manually_placed_;
// When is the literal to be removed from the memory
// Can be delete'd when:
// moved into the code buffer: kDeletedOnPlacementByPool
// the pool is delete'd: kDeletedOnPoolDestruction
// or left to the application: kManuallyDeleted.
DeletionPolicy deletion_policy_;
friend class MacroAssembler;
};
template <typename T>
class Literal : public RawLiteral {
public:
explicit Literal(const T& value,
PlacementPolicy placement_policy = kPlacedWhenUsed,
DeletionPolicy deletion_policy = kManuallyDeleted)
: RawLiteral(&value_, sizeof(T), placement_policy, deletion_policy),
value_(value) {}
explicit Literal(const T& value, DeletionPolicy deletion_policy)
: RawLiteral(&value_, sizeof(T), deletion_policy), value_(value) {}
void UpdateValue(const T& value, CodeBuffer* buffer) {
value_ = value;
if (IsBound()) {
buffer->UpdateData(GetLocation(), GetDataAddress(), GetSize());
}
}
private:
T value_;
};
class StringLiteral : public RawLiteral {
public:
explicit StringLiteral(const char* str,
PlacementPolicy placement_policy = kPlacedWhenUsed,
DeletionPolicy deletion_policy = kManuallyDeleted)
: RawLiteral(str,
static_cast<int>(strlen(str) + 1),
placement_policy,
deletion_policy) {
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
}
explicit StringLiteral(const char* str, DeletionPolicy deletion_policy)
: RawLiteral(str, static_cast<int>(strlen(str) + 1), deletion_policy) {
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
}
};
} // namespace aarch32
// Required InvalSet template specialisations.
#define INVAL_SET_TEMPLATE_PARAMETERS \
aarch32::Location::ForwardRef, aarch32::Location::kNPreallocatedElements, \
int32_t, aarch32::Location::kInvalidLinkKey, \
aarch32::Location::kReclaimFrom, aarch32::Location::kReclaimFactor
template <>
inline int32_t InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::GetKey(
const aarch32::Location::ForwardRef& element) {
return element.GetLocation();
}
template <>
inline void InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::SetKey(
aarch32::Location::ForwardRef* element, int32_t key) {
element->SetLocationToInvalidateOnly(key);
}
#undef INVAL_SET_TEMPLATE_PARAMETERS
} // namespace vixl
#endif // VIXL_AARCH32_LABEL_AARCH32_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,927 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH32_OPERANDS_AARCH32_H_
#define VIXL_AARCH32_OPERANDS_AARCH32_H_
#include "aarch32/instructions-aarch32.h"
namespace vixl {
namespace aarch32 {
// Operand represents generic set of arguments to pass to an instruction.
//
// Usage: <instr> <Rd> , <Operand>
//
// where <instr> is the instruction to use (e.g., Mov(), Rsb(), etc.)
// <Rd> is the destination register
// <Operand> is the rest of the arguments to the instruction
//
// <Operand> can be one of:
//
// #<imm> - an unsigned 32-bit immediate value
// <Rm>, <shift> <#amount> - immediate shifted register
// <Rm>, <shift> <Rs> - register shifted register
//
class Operand {
public:
// { #<immediate> }
// where <immediate> is uint32_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(uint32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoReg),
shift_(LSL),
amount_(0),
rs_(NoReg) {}
Operand(int32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoReg),
shift_(LSL),
amount_(0),
rs_(NoReg) {}
// rm
// where rm is the base register
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(Register rm) // NOLINT(runtime/explicit)
: imm_(0),
rm_(rm),
shift_(LSL),
amount_(0),
rs_(NoReg) {
VIXL_ASSERT(rm_.IsValid());
}
// rm, <shift>
// where rm is the base register, and
// <shift> is RRX
Operand(Register rm, Shift shift)
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(NoReg) {
VIXL_ASSERT(rm_.IsValid());
VIXL_ASSERT(shift_.IsRRX());
}
// rm, <shift> #<amount>
// where rm is the base register, and
// <shift> is one of {LSL, LSR, ASR, ROR}, and
// <amount> is uint6_t.
Operand(Register rm, Shift shift, uint32_t amount)
: imm_(0), rm_(rm), shift_(shift), amount_(amount), rs_(NoReg) {
VIXL_ASSERT(rm_.IsValid());
VIXL_ASSERT(!shift_.IsRRX());
#ifdef VIXL_DEBUG
switch (shift_.GetType()) {
case LSL:
VIXL_ASSERT(amount_ <= 31);
break;
case ROR:
VIXL_ASSERT(amount_ <= 31);
break;
case LSR:
case ASR:
VIXL_ASSERT(amount_ <= 32);
break;
case RRX:
default:
VIXL_UNREACHABLE();
break;
}
#endif
}
// rm, <shift> rs
// where rm is the base register, and
// <shift> is one of {LSL, LSR, ASR, ROR}, and
// rs is the shifted register
Operand(Register rm, Shift shift, Register rs)
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(rs) {
VIXL_ASSERT(rm_.IsValid() && rs_.IsValid());
VIXL_ASSERT(!shift_.IsRRX());
}
// Factory methods creating operands from any integral or pointer type. The
// source must fit into 32 bits.
template <typename T>
static Operand From(T immediate) {
#if __cplusplus >= 201103L
VIXL_STATIC_ASSERT_MESSAGE(std::is_integral<T>::value,
"An integral type is required to build an "
"immediate operand.");
#endif
// Allow both a signed or unsigned 32 bit integer to be passed, but store it
// as a uint32_t. The signedness information will be lost. We have to add a
// static_cast to make sure the compiler does not complain about implicit 64
// to 32 narrowing. It's perfectly acceptable for the user to pass a 64-bit
// value, as long as it can be encoded in 32 bits.
VIXL_ASSERT(IsInt32(immediate) || IsUint32(immediate));
return Operand(static_cast<uint32_t>(immediate));
}
template <typename T>
static Operand From(T* address) {
uintptr_t address_as_integral = reinterpret_cast<uintptr_t>(address);
VIXL_ASSERT(IsUint32(address_as_integral));
return Operand(static_cast<uint32_t>(address_as_integral));
}
bool IsImmediate() const { return !rm_.IsValid(); }
bool IsPlainRegister() const {
return rm_.IsValid() && !shift_.IsRRX() && !rs_.IsValid() && (amount_ == 0);
}
bool IsImmediateShiftedRegister() const {
return rm_.IsValid() && !rs_.IsValid();
}
bool IsRegisterShiftedRegister() const {
return rm_.IsValid() && rs_.IsValid();
}
uint32_t GetImmediate() const {
VIXL_ASSERT(IsImmediate());
return imm_;
}
int32_t GetSignedImmediate() const {
VIXL_ASSERT(IsImmediate());
int32_t result;
memcpy(&result, &imm_, sizeof(result));
return result;
}
Register GetBaseRegister() const {
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
return rm_;
}
Shift GetShift() const {
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
return shift_;
}
uint32_t GetShiftAmount() const {
VIXL_ASSERT(IsImmediateShiftedRegister());
return amount_;
}
Register GetShiftRegister() const {
VIXL_ASSERT(IsRegisterShiftedRegister());
return rs_;
}
uint32_t GetTypeEncodingValue() const {
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
}
private:
// Forbid implicitely creating operands around types that cannot be encoded
// into a uint32_t without loss.
#if __cplusplus >= 201103L
Operand(int64_t) = delete; // NOLINT(runtime/explicit)
Operand(uint64_t) = delete; // NOLINT(runtime/explicit)
Operand(float) = delete; // NOLINT(runtime/explicit)
Operand(double) = delete; // NOLINT(runtime/explicit)
#else
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(int64_t) { // NOLINT(runtime/explicit)
VIXL_UNREACHABLE();
}
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(uint64_t) { // NOLINT(runtime/explicit)
VIXL_UNREACHABLE();
}
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(float) { // NOLINT
VIXL_UNREACHABLE();
}
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(double) { // NOLINT
VIXL_UNREACHABLE();
}
#endif
uint32_t imm_;
Register rm_;
Shift shift_;
uint32_t amount_;
Register rs_;
};
std::ostream& operator<<(std::ostream& os, const Operand& operand);
class NeonImmediate {
template <typename T>
struct DataTypeIdentity {
T data_type_;
};
public:
// { #<immediate> }
// where <immediate> is 32 bit number.
// This is allowed to be an implicit constructor because NeonImmediate is
// a wrapper class that doesn't normally perform any type conversion.
NeonImmediate(uint32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I32) {}
NeonImmediate(int immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I32) {}
// { #<immediate> }
// where <immediate> is a 64 bit number
// This is allowed to be an implicit constructor because NeonImmediate is
// a wrapper class that doesn't normally perform any type conversion.
NeonImmediate(int64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I64) {}
NeonImmediate(uint64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I64) {}
// { #<immediate> }
// where <immediate> is a non zero floating point number which can be encoded
// as an 8 bit floating point (checked by the constructor).
// This is allowed to be an implicit constructor because NeonImmediate is
// a wrapper class that doesn't normally perform any type conversion.
NeonImmediate(float immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(F32) {}
NeonImmediate(double immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(F64) {}
NeonImmediate(const NeonImmediate& src)
: imm_(src.imm_), immediate_type_(src.immediate_type_) {}
template <typename T>
T GetImmediate() const {
return GetImmediate(DataTypeIdentity<T>());
}
template <typename T>
T GetImmediate(const DataTypeIdentity<T>&) const {
VIXL_ASSERT(sizeof(T) <= sizeof(uint32_t));
VIXL_ASSERT(CanConvert<T>());
if (immediate_type_.Is(I64))
return static_cast<T>(imm_.u64_ & static_cast<T>(-1));
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
return static_cast<T>(imm_.u32_ & static_cast<T>(-1));
}
uint64_t GetImmediate(const DataTypeIdentity<uint64_t>&) const {
VIXL_ASSERT(CanConvert<uint64_t>());
if (immediate_type_.Is(I32)) return imm_.u32_;
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
return imm_.u64_;
}
float GetImmediate(const DataTypeIdentity<float>&) const {
VIXL_ASSERT(CanConvert<float>());
if (immediate_type_.Is(F64)) return static_cast<float>(imm_.d_);
return imm_.f_;
}
double GetImmediate(const DataTypeIdentity<double>&) const {
VIXL_ASSERT(CanConvert<double>());
if (immediate_type_.Is(F32)) return static_cast<double>(imm_.f_);
return imm_.d_;
}
bool IsInteger32() const { return immediate_type_.Is(I32); }
bool IsInteger64() const { return immediate_type_.Is(I64); }
bool IsInteger() const { return IsInteger32() | IsInteger64(); }
bool IsFloat() const { return immediate_type_.Is(F32); }
bool IsDouble() const { return immediate_type_.Is(F64); }
bool IsFloatZero() const {
if (immediate_type_.Is(F32)) return imm_.f_ == 0.0f;
if (immediate_type_.Is(F64)) return imm_.d_ == 0.0;
return false;
}
template <typename T>
bool CanConvert() const {
return CanConvert(DataTypeIdentity<T>());
}
template <typename T>
bool CanConvert(const DataTypeIdentity<T>&) const {
VIXL_ASSERT(sizeof(T) < sizeof(uint32_t));
return (immediate_type_.Is(I32) && ((imm_.u32_ >> (8 * sizeof(T))) == 0)) ||
(immediate_type_.Is(I64) && ((imm_.u64_ >> (8 * sizeof(T))) == 0)) ||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
}
bool CanConvert(const DataTypeIdentity<uint32_t>&) const {
return immediate_type_.Is(I32) ||
(immediate_type_.Is(I64) && ((imm_.u64_ >> 32) == 0)) ||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
}
bool CanConvert(const DataTypeIdentity<uint64_t>&) const {
return IsInteger() || CanConvert<uint32_t>();
}
bool CanConvert(const DataTypeIdentity<float>&) const {
return IsFloat() || IsDouble();
}
bool CanConvert(const DataTypeIdentity<double>&) const {
return IsFloat() || IsDouble();
}
friend std::ostream& operator<<(std::ostream& os,
const NeonImmediate& operand);
private:
union NeonImmediateType {
uint64_t u64_;
double d_;
uint32_t u32_;
float f_;
NeonImmediateType(uint64_t u) : u64_(u) {}
NeonImmediateType(int64_t u) : u64_(u) {}
NeonImmediateType(uint32_t u) : u32_(u) {}
NeonImmediateType(int32_t u) : u32_(u) {}
NeonImmediateType(double d) : d_(d) {}
NeonImmediateType(float f) : f_(f) {}
NeonImmediateType(const NeonImmediateType& ref) : u64_(ref.u64_) {}
} imm_;
DataType immediate_type_;
};
std::ostream& operator<<(std::ostream& os, const NeonImmediate& operand);
class NeonOperand {
public:
NeonOperand(int32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(int64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(uint64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(float immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(double immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: imm_(imm),
rm_(NoDReg) {}
NeonOperand(const VRegister& rm) // NOLINT(runtime/explicit)
: imm_(0),
rm_(rm) {
VIXL_ASSERT(rm_.IsValid());
}
bool IsImmediate() const { return !rm_.IsValid(); }
bool IsRegister() const { return rm_.IsValid(); }
bool IsFloatZero() const {
VIXL_ASSERT(IsImmediate());
return imm_.IsFloatZero();
}
const NeonImmediate& GetNeonImmediate() const { return imm_; }
VRegister GetRegister() const {
VIXL_ASSERT(IsRegister());
return rm_;
}
protected:
NeonImmediate imm_;
VRegister rm_;
};
std::ostream& operator<<(std::ostream& os, const NeonOperand& operand);
// SOperand represents either an immediate or a SRegister.
class SOperand : public NeonOperand {
public:
// #<immediate>
// where <immediate> is 32bit int
// This is allowed to be an implicit constructor because SOperand is
// a wrapper class that doesn't normally perform any type conversion.
SOperand(int32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
SOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
// #<immediate>
// where <immediate> is 32bit float
SOperand(float immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
// where <immediate> is 64bit float
SOperand(double immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
SOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: NeonOperand(imm) {}
// rm
// This is allowed to be an implicit constructor because SOperand is
// a wrapper class that doesn't normally perform any type conversion.
SOperand(SRegister rm) // NOLINT(runtime/explicit)
: NeonOperand(rm) {}
SRegister GetRegister() const {
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kSRegister));
return SRegister(rm_.GetCode());
}
};
// DOperand represents either an immediate or a DRegister.
std::ostream& operator<<(std::ostream& os, const SOperand& operand);
class DOperand : public NeonOperand {
public:
// #<immediate>
// where <immediate> is uint32_t.
// This is allowed to be an implicit constructor because DOperand is
// a wrapper class that doesn't normally perform any type conversion.
DOperand(int32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(int64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(uint64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
// #<immediate>
// where <immediate> is a non zero floating point number which can be encoded
// as an 8 bit floating point (checked by the constructor).
// This is allowed to be an implicit constructor because DOperand is
// a wrapper class that doesn't normally perform any type conversion.
DOperand(float immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(double immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: NeonOperand(imm) {}
// rm
// This is allowed to be an implicit constructor because DOperand is
// a wrapper class that doesn't normally perform any type conversion.
DOperand(DRegister rm) // NOLINT(runtime/explicit)
: NeonOperand(rm) {}
DRegister GetRegister() const {
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kDRegister));
return DRegister(rm_.GetCode());
}
};
std::ostream& operator<<(std::ostream& os, const DOperand& operand);
// QOperand represents either an immediate or a QRegister.
class QOperand : public NeonOperand {
public:
// #<immediate>
// where <immediate> is uint32_t.
// This is allowed to be an implicit constructor because QOperand is
// a wrapper class that doesn't normally perform any type conversion.
QOperand(int32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(int64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(uint64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(float immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(double immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: NeonOperand(imm) {}
// rm
// This is allowed to be an implicit constructor because QOperand is
// a wrapper class that doesn't normally perform any type conversion.
QOperand(QRegister rm) // NOLINT(runtime/explicit)
: NeonOperand(rm) {
VIXL_ASSERT(rm_.IsValid());
}
QRegister GetRegister() const {
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kQRegister));
return QRegister(rm_.GetCode());
}
};
std::ostream& operator<<(std::ostream& os, const QOperand& operand);
class ImmediateVFP : public EncodingValue {
template <typename T>
struct FloatType {
typedef T base_type;
};
public:
explicit ImmediateVFP(const NeonImmediate& neon_imm) {
if (neon_imm.IsFloat()) {
const float imm = neon_imm.GetImmediate<float>();
if (VFP::IsImmFP32(imm)) {
SetEncodingValue(VFP::FP32ToImm8(imm));
}
} else if (neon_imm.IsDouble()) {
const double imm = neon_imm.GetImmediate<double>();
if (VFP::IsImmFP64(imm)) {
SetEncodingValue(VFP::FP64ToImm8(imm));
}
}
}
template <typename T>
static T Decode(uint32_t v) {
return Decode(v, FloatType<T>());
}
static float Decode(uint32_t imm8, const FloatType<float>&) {
return VFP::Imm8ToFP32(imm8);
}
static double Decode(uint32_t imm8, const FloatType<double>&) {
return VFP::Imm8ToFP64(imm8);
}
};
class ImmediateVbic : public EncodingValueAndImmediate {
public:
ImmediateVbic(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVand : public ImmediateVbic {
public:
ImmediateVand(DataType dt, const NeonImmediate neon_imm)
: ImmediateVbic(dt, neon_imm) {
if (IsValid()) {
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
}
}
};
class ImmediateVmov : public EncodingValueAndImmediate {
public:
ImmediateVmov(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVmvn : public EncodingValueAndImmediate {
public:
ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVorr : public EncodingValueAndImmediate {
public:
ImmediateVorr(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVorn : public ImmediateVorr {
public:
ImmediateVorn(DataType dt, const NeonImmediate& neon_imm)
: ImmediateVorr(dt, neon_imm) {
if (IsValid()) {
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
}
}
};
// MemOperand represents the addressing mode of a load or store instruction.
//
// Usage: <instr> <Rt> , <MemOperand>
//
// where <instr> is the instruction to use (e.g., Ldr(), Str(), etc.),
// <Rt> is general purpose register to be transferred,
// <MemOperand> is the rest of the arguments to the instruction
//
// <MemOperand> can be in one of 3 addressing modes:
//
// [ <Rn>, <offset> ] == offset addressing
// [ <Rn>, <offset> ]! == pre-indexed addressing
// [ <Rn> ], <offset> == post-indexed addressing
//
// where <offset> can be one of:
// - an immediate constant, such as <imm8>, <imm12>
// - an index register <Rm>
// - a shifted index register <Rm>, <shift> #<amount>
//
// The index register may have an associated {+/-} sign,
// which if ommitted, defaults to + .
//
// We have two constructors for the offset:
//
// One with a signed value offset parameter. The value of sign_ is
// "sign_of(constructor's offset parameter) and the value of offset_ is
// "constructor's offset parameter".
//
// The other with a sign and a positive value offset parameters. The value of
// sign_ is "constructor's sign parameter" and the value of offset_ is
// "constructor's sign parameter * constructor's offset parameter".
//
// The value of offset_ reflects the effective offset. For an offset_ of 0,
// sign_ can be positive or negative. Otherwise, sign_ always agrees with
// the sign of offset_.
class MemOperand {
public:
// rn
// where rn is the general purpose base register only
explicit MemOperand(Register rn, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(NoReg),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode | kMemOperandRegisterOnly) {
VIXL_ASSERT(rn_.IsValid());
}
// rn, #<imm>
// where rn is the general purpose base register,
// <imm> is a 32-bit offset to add to rn
//
// Note: if rn is PC, then this form is equivalent to a "label"
// Note: the second constructor allow minus zero (-0).
MemOperand(Register rn, int32_t offset, AddrMode addrmode = Offset)
: rn_(rn),
offset_(offset),
sign_((offset < 0) ? minus : plus),
rm_(NoReg),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid());
}
MemOperand(Register rn, Sign sign, int32_t offset, AddrMode addrmode = Offset)
: rn_(rn),
offset_(sign.IsPlus() ? offset : -offset),
sign_(sign),
rm_(NoReg),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid());
// With this constructor, the sign must only be specified by "sign".
VIXL_ASSERT(offset >= 0);
}
// rn, {+/-}rm
// where rn is the general purpose base register,
// {+/-} is the sign of the index register,
// rm is the general purpose index register,
MemOperand(Register rn, Sign sign, Register rm, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(sign),
rm_(rm),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
}
// rn, rm
// where rn is the general purpose base register,
// rm is the general purpose index register,
MemOperand(Register rn, Register rm, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(rm),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
}
// rn, {+/-}rm, <shift>
// where rn is the general purpose base register,
// {+/-} is the sign of the index register,
// rm is the general purpose index register,
// <shift> is RRX, applied to value from rm
MemOperand(Register rn,
Sign sign,
Register rm,
Shift shift,
AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(sign),
rm_(rm),
shift_(shift),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
VIXL_ASSERT(shift_.IsRRX());
}
// rn, rm, <shift>
// where rn is the general purpose base register,
// rm is the general purpose index register,
// <shift> is RRX, applied to value from rm
MemOperand(Register rn, Register rm, Shift shift, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(rm),
shift_(shift),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
VIXL_ASSERT(shift_.IsRRX());
}
// rn, {+/-}rm, <shift> #<amount>
// where rn is the general purpose base register,
// {+/-} is the sign of the index register,
// rm is the general purpose index register,
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
// <shift_amount> is optional size to apply to value from rm
MemOperand(Register rn,
Sign sign,
Register rm,
Shift shift,
uint32_t shift_amount,
AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(sign),
rm_(rm),
shift_(shift),
shift_amount_(shift_amount),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
CheckShift();
}
// rn, rm, <shift> #<amount>
// where rn is the general purpose base register,
// rm is the general purpose index register,
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
// <shift_amount> is optional size to apply to value from rm
MemOperand(Register rn,
Register rm,
Shift shift,
uint32_t shift_amount,
AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(rm),
shift_(shift),
shift_amount_(shift_amount),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
CheckShift();
}
Register GetBaseRegister() const { return rn_; }
int32_t GetOffsetImmediate() const { return offset_; }
bool IsOffsetImmediateWithinRange(int min,
int max,
int multiple_of = 1) const {
return (offset_ >= min) && (offset_ <= max) &&
((offset_ % multiple_of) == 0);
}
Sign GetSign() const { return sign_; }
Register GetOffsetRegister() const { return rm_; }
Shift GetShift() const { return shift_; }
unsigned GetShiftAmount() const { return shift_amount_; }
AddrMode GetAddrMode() const {
return static_cast<AddrMode>(addrmode_ & kMemOperandAddrModeMask);
}
bool IsRegisterOnly() const {
return (addrmode_ & kMemOperandRegisterOnly) != 0;
}
bool IsImmediate() const { return !rm_.IsValid(); }
bool IsImmediateZero() const { return !rm_.IsValid() && (offset_ == 0); }
bool IsPlainRegister() const {
return rm_.IsValid() && shift_.IsLSL() && (shift_amount_ == 0);
}
bool IsShiftedRegister() const { return rm_.IsValid(); }
bool IsImmediateOffset() const {
return (GetAddrMode() == Offset) && !rm_.IsValid();
}
bool IsImmediateZeroOffset() const {
return (GetAddrMode() == Offset) && !rm_.IsValid() && (offset_ == 0);
}
bool IsRegisterOffset() const {
return (GetAddrMode() == Offset) && rm_.IsValid() && shift_.IsLSL() &&
(shift_amount_ == 0);
}
bool IsShiftedRegisterOffset() const {
return (GetAddrMode() == Offset) && rm_.IsValid();
}
uint32_t GetTypeEncodingValue() const {
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
}
bool IsOffset() const { return GetAddrMode() == Offset; }
bool IsPreIndex() const { return GetAddrMode() == PreIndex; }
bool IsPostIndex() const { return GetAddrMode() == PostIndex; }
bool IsShiftValid() const { return shift_.IsValidAmount(shift_amount_); }
private:
static const int kMemOperandRegisterOnly = 0x1000;
static const int kMemOperandAddrModeMask = 0xfff;
void CheckShift() {
#ifdef VIXL_DEBUG
// Disallow any zero shift other than RRX #0 and LSL #0 .
if ((shift_amount_ == 0) && shift_.IsRRX()) return;
if ((shift_amount_ == 0) && !shift_.IsLSL()) {
VIXL_ABORT_WITH_MSG(
"A shift by 0 is only accepted in "
"the case of lsl and will be treated as "
"no shift.\n");
}
switch (shift_.GetType()) {
case LSL:
VIXL_ASSERT(shift_amount_ <= 31);
break;
case ROR:
VIXL_ASSERT(shift_amount_ <= 31);
break;
case LSR:
case ASR:
VIXL_ASSERT(shift_amount_ <= 32);
break;
case RRX:
default:
VIXL_UNREACHABLE();
break;
}
#endif
}
Register rn_;
int32_t offset_;
Sign sign_;
Register rm_;
Shift shift_;
uint32_t shift_amount_;
uint32_t addrmode_;
};
std::ostream& operator<<(std::ostream& os, const MemOperand& operand);
class AlignedMemOperand : public MemOperand {
public:
AlignedMemOperand(Register rn, Alignment align, AddrMode addrmode = Offset)
: MemOperand(rn, addrmode), align_(align) {
VIXL_ASSERT(addrmode != PreIndex);
}
AlignedMemOperand(Register rn,
Alignment align,
Register rm,
AddrMode addrmode)
: MemOperand(rn, rm, addrmode), align_(align) {
VIXL_ASSERT(addrmode != PreIndex);
}
Alignment GetAlignment() const { return align_; }
private:
Alignment align_;
};
std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand);
} // namespace aarch32
} // namespace vixl
#endif // VIXL_AARCH32_OPERANDS_AARCH32_H_

View File

@ -0,0 +1,167 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The ABI features are only supported with C++11 or later.
#if __cplusplus >= 201103L
// This should not be defined manually.
#define VIXL_HAS_ABI_SUPPORT
#elif defined(VIXL_HAS_ABI_SUPPORT)
#error "The ABI support requires C++11 or later."
#endif
#ifdef VIXL_HAS_ABI_SUPPORT
#ifndef VIXL_AARCH64_ABI_AARCH64_H_
#define VIXL_AARCH64_ABI_AARCH64_H_
#include <algorithm>
#include <type_traits>
#include "../globals-vixl.h"
#include "instructions-aarch64.h"
#include "operands-aarch64.h"
namespace vixl {
namespace aarch64 {
// Class describing the AArch64 procedure call standard, as defined in "ARM
// Procedure Call Standard for the ARM 64-bit Architecture (AArch64)",
// release 1.0 (AAPCS below).
//
// The stages in the comments match the description in that document.
//
// Stage B does not apply to arguments handled by this class.
class ABI {
public:
explicit ABI(Register stack_pointer = sp) : stack_pointer_(stack_pointer) {
// Stage A - Initialization
Reset();
}
void Reset() {
NGRN_ = 0;
NSRN_ = 0;
stack_offset_ = 0;
}
int GetStackSpaceRequired() { return stack_offset_; }
// The logic is described in section 5.5 of the AAPCS.
template <typename T>
GenericOperand GetReturnGenericOperand() const {
ABI abi(stack_pointer_);
GenericOperand result = abi.GetNextParameterGenericOperand<T>();
VIXL_ASSERT(result.IsCPURegister());
return result;
}
// The logic is described in section 5.4.2 of the AAPCS.
// The `GenericOperand` returned describes the location reserved for the
// argument from the point of view of the callee.
template <typename T>
GenericOperand GetNextParameterGenericOperand() {
const bool is_floating_point_type = std::is_floating_point<T>::value;
const bool is_integral_type =
std::is_integral<T>::value || std::is_enum<T>::value;
const bool is_pointer_type = std::is_pointer<T>::value;
int type_alignment = std::alignment_of<T>::value;
// We only support basic types.
VIXL_ASSERT(is_floating_point_type || is_integral_type || is_pointer_type);
// To ensure we get the correct type of operand when simulating on a 32-bit
// host, force the size of pointer types to the native AArch64 pointer size.
unsigned size = is_pointer_type ? 8 : sizeof(T);
// The size of the 'operand' reserved for the argument.
unsigned operand_size = AlignUp(size, kWRegSizeInBytes);
if (size > 8) {
VIXL_UNIMPLEMENTED();
return GenericOperand();
}
// Stage C.1
if (is_floating_point_type && (NSRN_ < 8)) {
return GenericOperand(FPRegister(NSRN_++, size * kBitsPerByte));
}
// Stages C.2, C.3, and C.4: Unsupported. Caught by the assertions above.
// Stages C.5 and C.6
if (is_floating_point_type) {
VIXL_STATIC_ASSERT(
!is_floating_point_type ||
(std::is_same<T, float>::value || std::is_same<T, double>::value));
int offset = stack_offset_;
stack_offset_ += 8;
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
}
// Stage C.7
if ((is_integral_type || is_pointer_type) && (size <= 8) && (NGRN_ < 8)) {
return GenericOperand(Register(NGRN_++, operand_size * kBitsPerByte));
}
// Stage C.8
if (type_alignment == 16) {
NGRN_ = AlignUp(NGRN_, 2);
}
// Stage C.9
if (is_integral_type && (size == 16) && (NGRN_ < 7)) {
VIXL_UNIMPLEMENTED();
return GenericOperand();
}
// Stage C.10: Unsupported. Caught by the assertions above.
// Stage C.11
NGRN_ = 8;
// Stage C.12
stack_offset_ = AlignUp(stack_offset_, std::max(type_alignment, 8));
// Stage C.13: Unsupported. Caught by the assertions above.
// Stage C.14
VIXL_ASSERT(size <= 8u);
size = std::max(size, 8u);
int offset = stack_offset_;
stack_offset_ += size;
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
}
private:
Register stack_pointer_;
// Next General-purpose Register Number.
int NGRN_;
// Next SIMD and Floating-point Register Number.
int NSRN_;
// The acronym "NSAA" used in the standard refers to the "Next Stacked
// Argument Address". Here we deal with offsets from the stack pointer.
int stack_offset_;
};
template <>
inline GenericOperand ABI::GetReturnGenericOperand<void>() const {
return GenericOperand();
}
}
} // namespace vixl::aarch64
#endif // VIXL_AARCH64_ABI_AARCH64_H_
#endif // VIXL_HAS_ABI_SUPPORT

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,86 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CPU_AARCH64_H
#define VIXL_CPU_AARCH64_H
#include "../globals-vixl.h"
#include "instructions-aarch64.h"
namespace vixl {
namespace aarch64 {
class CPU {
public:
// Initialise CPU support.
static void SetUp();
// Ensures the data at a given address and with a given size is the same for
// the I and D caches. I and D caches are not automatically coherent on ARM
// so this operation is required before any dynamically generated code can
// safely run.
static void EnsureIAndDCacheCoherency(void *address, size_t length);
// Handle tagged pointers.
template <typename T>
static T SetPointerTag(T pointer, uint64_t tag) {
VIXL_ASSERT(IsUintN(kAddressTagWidth, tag));
// Use C-style casts to get static_cast behaviour for integral types (T),
// and reinterpret_cast behaviour for other types.
uint64_t raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
return (T)raw;
}
template <typename T>
static uint64_t GetPointerTag(T pointer) {
// Use C-style casts to get static_cast behaviour for integral types (T),
// and reinterpret_cast behaviour for other types.
uint64_t raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
return (raw & kAddressTagMask) >> kAddressTagOffset;
}
private:
// Return the content of the cache type register.
static uint32_t GetCacheType();
// I and D cache line size in bytes.
static unsigned icache_line_size_;
static unsigned dcache_line_size_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_CPU_AARCH64_H

View File

@ -0,0 +1,125 @@
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Arm Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
#define VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
#include <iostream>
#include "../cpu-features.h"
#include "decoder-aarch64.h"
namespace vixl {
namespace aarch64 {
// This visitor records the CPU features that each decoded instruction requires.
// It provides:
// - the set of CPU features required by the most recently decoded instruction,
// - a cumulative set of encountered CPU features,
// - an optional list of 'available' CPU features.
//
// Primarily, this allows the Disassembler and Simulator to share the same CPU
// features logic. However, it can be used standalone to scan code blocks for
// CPU features.
class CPUFeaturesAuditor : public DecoderVisitor {
public:
// Construction arguments:
// - If a decoder is specified, the CPUFeaturesAuditor automatically
// registers itself as a visitor. Otherwise, this can be done manually.
//
// - If an `available` features list is provided, it is used as a hint in
// cases where instructions may be provided by multiple separate features.
// An example of this is FP&SIMD loads and stores: some of these are used
// in both FP and integer SIMD code. If exactly one of those features is
// in `available` when one of these instructions is encountered, then the
// auditor will record that feature. Otherwise, it will record _both_
// features.
explicit CPUFeaturesAuditor(
Decoder* decoder, const CPUFeatures& available = CPUFeatures::None())
: available_(available), decoder_(decoder) {
if (decoder_ != NULL) decoder_->AppendVisitor(this);
}
explicit CPUFeaturesAuditor(
const CPUFeatures& available = CPUFeatures::None())
: available_(available), decoder_(NULL) {}
virtual ~CPUFeaturesAuditor() {
if (decoder_ != NULL) decoder_->RemoveVisitor(this);
}
void ResetSeenFeatures() {
seen_ = CPUFeatures::None();
last_instruction_ = CPUFeatures::None();
}
// Query or set available CPUFeatures.
const CPUFeatures& GetAvailableFeatures() const { return available_; }
void SetAvailableFeatures(const CPUFeatures& available) {
available_ = available;
}
// Query CPUFeatures seen since construction (or the last call to `Reset()`).
const CPUFeatures& GetSeenFeatures() const { return seen_; }
// Query CPUFeatures from the last instruction visited by this auditor.
const CPUFeatures& GetInstructionFeatures() const {
return last_instruction_;
}
bool InstructionIsAvailable() const {
return available_.Has(last_instruction_);
}
// The common CPUFeatures interface operates on the available_ list.
CPUFeatures* GetCPUFeatures() { return &available_; }
void SetCPUFeatures(const CPUFeatures& available) {
SetAvailableFeatures(available);
}
// Declare all Visitor functions.
#define DECLARE(A) \
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
class RecordInstructionFeaturesScope;
void LoadStoreHelper(const Instruction* instr);
void LoadStorePairHelper(const Instruction* instr);
CPUFeatures seen_;
CPUFeatures last_instruction_;
CPUFeatures available_;
Decoder* decoder_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_

View File

@ -0,0 +1,290 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_DECODER_AARCH64_H_
#define VIXL_AARCH64_DECODER_AARCH64_H_
#include <list>
#include "../globals-vixl.h"
#include "instructions-aarch64.h"
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST_THAT_RETURN(V) \
V(AddSubExtended) \
V(AddSubImmediate) \
V(AddSubShifted) \
V(AddSubWithCarry) \
V(AtomicMemory) \
V(Bitfield) \
V(CompareBranch) \
V(ConditionalBranch) \
V(ConditionalCompareImmediate) \
V(ConditionalCompareRegister) \
V(ConditionalSelect) \
V(Crypto2RegSHA) \
V(Crypto3RegSHA) \
V(CryptoAES) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(Exception) \
V(Extract) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPFixedPointConvert) \
V(FPImmediate) \
V(FPIntegerConvert) \
V(LoadLiteral) \
V(LoadStoreExclusive) \
V(LoadStorePairNonTemporal) \
V(LoadStorePairOffset) \
V(LoadStorePairPostIndex) \
V(LoadStorePairPreIndex) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnscaledOffset) \
V(LoadStoreUnsignedOffset) \
V(LogicalImmediate) \
V(LogicalShifted) \
V(MoveWideImmediate) \
V(NEON2RegMisc) \
V(NEON2RegMiscFP16) \
V(NEON3Different) \
V(NEON3Same) \
V(NEON3SameExtra) \
V(NEON3SameFP16) \
V(NEONAcrossLanes) \
V(NEONByIndexedElement) \
V(NEONCopy) \
V(NEONExtract) \
V(NEONLoadStoreMultiStruct) \
V(NEONLoadStoreMultiStructPostIndex) \
V(NEONLoadStoreSingleStruct) \
V(NEONLoadStoreSingleStructPostIndex) \
V(NEONModifiedImmediate) \
V(NEONPerm) \
V(NEONScalar2RegMisc) \
V(NEONScalar2RegMiscFP16) \
V(NEONScalar3Diff) \
V(NEONScalar3Same) \
V(NEONScalar3SameExtra) \
V(NEONScalar3SameFP16) \
V(NEONScalarByIndexedElement) \
V(NEONScalarCopy) \
V(NEONScalarPairwise) \
V(NEONScalarShiftImmediate) \
V(NEONShiftImmediate) \
V(NEONTable) \
V(PCRelAddressing) \
V(System) \
V(TestBranch) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister)
#define VISITOR_LIST_THAT_DONT_RETURN(V) \
V(Unallocated) \
V(Unimplemented)
#define VISITOR_LIST(V) \
VISITOR_LIST_THAT_RETURN(V) \
VISITOR_LIST_THAT_DONT_RETURN(V)
namespace vixl {
namespace aarch64 {
// The Visitor interface. Disassembler and simulator (and other tools)
// must provide implementations for all of these functions.
class DecoderVisitor {
public:
enum VisitorConstness { kConstVisitor, kNonConstVisitor };
explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
: constness_(constness) {}
virtual ~DecoderVisitor() {}
#define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
VISITOR_LIST(DECLARE)
#undef DECLARE
bool IsConstVisitor() const { return constness_ == kConstVisitor; }
Instruction* MutableInstruction(const Instruction* instr) {
VIXL_ASSERT(!IsConstVisitor());
return const_cast<Instruction*>(instr);
}
private:
const VisitorConstness constness_;
};
class Decoder {
public:
Decoder() {}
// Top-level wrappers around the actual decoding function.
void Decode(const Instruction* instr) {
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
VIXL_ASSERT((*it)->IsConstVisitor());
}
DecodeInstruction(instr);
}
void Decode(Instruction* instr) {
DecodeInstruction(const_cast<const Instruction*>(instr));
}
// Decode all instructions from start (inclusive) to end (exclusive).
template <typename T>
void Decode(T start, T end) {
for (T instr = start; instr < end; instr = instr->GetNextInstruction()) {
Decode(instr);
}
}
// Register a new visitor class with the decoder.
// Decode() will call the corresponding visitor method from all registered
// visitor classes when decoding reaches the leaf node of the instruction
// decode tree.
// Visitors are called in order.
// A visitor can be registered multiple times.
//
// d.AppendVisitor(V1);
// d.AppendVisitor(V2);
// d.PrependVisitor(V2);
// d.AppendVisitor(V3);
//
// d.Decode(i);
//
// will call in order visitor methods in V2, V1, V2, V3.
void AppendVisitor(DecoderVisitor* visitor);
void PrependVisitor(DecoderVisitor* visitor);
// These helpers register `new_visitor` before or after the first instance of
// `registered_visiter` in the list.
// So if
// V1, V2, V1, V2
// are registered in this order in the decoder, calls to
// d.InsertVisitorAfter(V3, V1);
// d.InsertVisitorBefore(V4, V2);
// will yield the order
// V1, V3, V4, V2, V1, V2
//
// For more complex modifications of the order of registered visitors, one can
// directly access and modify the list of visitors via the `visitors()'
// accessor.
void InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
void InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
// Remove all instances of a previously registered visitor class from the list
// of visitors stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
#define DECLARE(A) void Visit##A(const Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
std::list<DecoderVisitor*>* visitors() { return &visitors_; }
private:
// Decodes an instruction and calls the visitor functions registered with the
// Decoder class.
void DecodeInstruction(const Instruction* instr);
// Decode the PC relative addressing instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x0.
void DecodePCRelAddressing(const Instruction* instr);
// Decode the add/subtract immediate instruction, and call the correspoding
// visitors.
// On entry, instruction bits 27:24 = 0x1.
void DecodeAddSubImmediate(const Instruction* instr);
// Decode the branch, system command, and exception generation parts of
// the instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
void DecodeBranchSystemException(const Instruction* instr);
// Decode the load and store parts of the instruction tree, and call
// the corresponding visitors.
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
void DecodeLoadStore(const Instruction* instr);
// Decode the logical immediate and move wide immediate parts of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x2.
void DecodeLogical(const Instruction* instr);
// Decode the bitfield and extraction parts of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x3.
void DecodeBitfieldExtract(const Instruction* instr);
// Decode the data processing parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
void DecodeDataProcessing(const Instruction* instr);
// Decode the floating point parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0xE, 0xF}.
void DecodeFP(const Instruction* instr);
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeNEONLoadStore(const Instruction* instr);
// Decode the Advanced SIMD (NEON) vector data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0x7.
void DecodeNEONVectorDataProcessing(const Instruction* instr);
// Decode the Advanced SIMD (NEON) scalar data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0xF.
void DecodeNEONScalarDataProcessing(const Instruction* instr);
private:
// Visitors are registered in a list.
std::list<DecoderVisitor*> visitors_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_DECODER_AARCH64_H_

View File

@ -0,0 +1,217 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_DISASM_AARCH64_H
#define VIXL_AARCH64_DISASM_AARCH64_H
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "cpu-features-auditor-aarch64.h"
#include "decoder-aarch64.h"
#include "instructions-aarch64.h"
#include "operands-aarch64.h"
namespace vixl {
namespace aarch64 {
class Disassembler : public DecoderVisitor {
public:
Disassembler();
Disassembler(char* text_buffer, int buffer_size);
virtual ~Disassembler();
char* GetOutput();
// Declare all Visitor functions.
#define DECLARE(A) \
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
VISITOR_LIST(DECLARE)
#undef DECLARE
protected:
virtual void ProcessOutput(const Instruction* instr);
// Default output functions. The functions below implement a default way of
// printing elements in the disassembly. A sub-class can override these to
// customize the disassembly output.
// Prints the name of a register.
// TODO: This currently doesn't allow renaming of V registers.
virtual void AppendRegisterNameToOutput(const Instruction* instr,
const CPURegister& reg);
// Prints a PC-relative offset. This is used for example when disassembling
// branches to immediate offsets.
virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
int64_t offset);
// Prints an address, in the general case. It can be code or data. This is
// used for example to print the target address of an ADR instruction.
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
const void* addr);
// Prints the address of some code.
// This is used for example to print the target address of a branch to an
// immediate offset.
// A sub-class can for example override this method to lookup the address and
// print an appropriate name.
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
const void* addr);
// Prints the address of some data.
// This is used for example to print the source address of a load literal
// instruction.
virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
const void* addr);
// Same as the above, but for addresses that are not relative to the code
// buffer. They are currently not used by VIXL.
virtual void AppendAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendCodeAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendDataAddressToOutput(const Instruction* instr,
const void* addr);
public:
// Get/Set the offset that should be added to code addresses when printing
// code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
// helpers.
// Below is an example of how a branch immediate instruction in memory at
// address 0xb010200 would disassemble with different offsets.
// Base address | Disassembly
// 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
// 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
// 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
int64_t CodeRelativeAddress(const void* instr);
private:
void Format(const Instruction* instr,
const char* mnemonic,
const char* format);
void Substitute(const Instruction* instr, const char* string);
int SubstituteField(const Instruction* instr, const char* format);
int SubstituteRegisterField(const Instruction* instr, const char* format);
int SubstituteImmediateField(const Instruction* instr, const char* format);
int SubstituteLiteralField(const Instruction* instr, const char* format);
int SubstituteBitfieldImmediateField(const Instruction* instr,
const char* format);
int SubstituteShiftField(const Instruction* instr, const char* format);
int SubstituteExtendField(const Instruction* instr, const char* format);
int SubstituteConditionField(const Instruction* instr, const char* format);
int SubstitutePCRelAddressField(const Instruction* instr, const char* format);
int SubstituteBranchTargetField(const Instruction* instr, const char* format);
int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
int SubstitutePrefetchField(const Instruction* instr, const char* format);
int SubstituteBarrierField(const Instruction* instr, const char* format);
int SubstituteSysOpField(const Instruction* instr, const char* format);
int SubstituteCrField(const Instruction* instr, const char* format);
bool RdIsZROrSP(const Instruction* instr) const {
return (instr->GetRd() == kZeroRegCode);
}
bool RnIsZROrSP(const Instruction* instr) const {
return (instr->GetRn() == kZeroRegCode);
}
bool RmIsZROrSP(const Instruction* instr) const {
return (instr->GetRm() == kZeroRegCode);
}
bool RaIsZROrSP(const Instruction* instr) const {
return (instr->GetRa() == kZeroRegCode);
}
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
int64_t code_address_offset() const { return code_address_offset_; }
protected:
void ResetOutput();
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
void set_code_address_offset(int64_t code_address_offset) {
code_address_offset_ = code_address_offset;
}
char* buffer_;
uint32_t buffer_pos_;
uint32_t buffer_size_;
bool own_buffer_;
int64_t code_address_offset_;
};
class PrintDisassembler : public Disassembler {
public:
explicit PrintDisassembler(FILE* stream)
: cpu_features_auditor_(NULL),
cpu_features_prefix_("// Needs: "),
cpu_features_suffix_(""),
stream_(stream) {}
// Convenience helpers for quick disassembly, without having to manually
// create a decoder.
void DisassembleBuffer(const Instruction* start, uint64_t size);
void DisassembleBuffer(const Instruction* start, const Instruction* end);
void Disassemble(const Instruction* instr);
// If a CPUFeaturesAuditor is specified, it will be used to annotate
// disassembly. The CPUFeaturesAuditor is expected to visit the instructions
// _before_ the disassembler, such that the CPUFeatures information is
// available when the disassembler is called.
void RegisterCPUFeaturesAuditor(CPUFeaturesAuditor* auditor) {
cpu_features_auditor_ = auditor;
}
// Set the prefix to appear before the CPU features annotations.
void SetCPUFeaturesPrefix(const char* prefix) {
VIXL_ASSERT(prefix != NULL);
cpu_features_prefix_ = prefix;
}
// Set the suffix to appear after the CPU features annotations.
void SetCPUFeaturesSuffix(const char* suffix) {
VIXL_ASSERT(suffix != NULL);
cpu_features_suffix_ = suffix;
}
protected:
virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE;
CPUFeaturesAuditor* cpu_features_auditor_;
const char* cpu_features_prefix_;
const char* cpu_features_suffix_;
private:
FILE* stream_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_DISASM_AARCH64_H

View File

@ -0,0 +1,865 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
#define VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "constants-aarch64.h"
namespace vixl {
namespace aarch64 {
// ISA constants. --------------------------------------------------------------
typedef uint32_t Instr;
const unsigned kInstructionSize = 4;
const unsigned kInstructionSizeLog2 = 2;
const unsigned kLiteralEntrySize = 4;
const unsigned kLiteralEntrySizeLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MBytes;
// This is the nominal page size (as used by the adrp instruction); the actual
// size of the memory pages allocated by the kernel is likely to differ.
const unsigned kPageSize = 4 * KBytes;
const unsigned kPageSizeLog2 = 12;
const unsigned kBRegSize = 8;
const unsigned kBRegSizeLog2 = 3;
const unsigned kBRegSizeInBytes = kBRegSize / 8;
const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
const unsigned kHRegSize = 16;
const unsigned kHRegSizeLog2 = 4;
const unsigned kHRegSizeInBytes = kHRegSize / 8;
const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
const unsigned kWRegSize = 32;
const unsigned kWRegSizeLog2 = 5;
const unsigned kWRegSizeInBytes = kWRegSize / 8;
const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
const unsigned kXRegSize = 64;
const unsigned kXRegSizeLog2 = 6;
const unsigned kXRegSizeInBytes = kXRegSize / 8;
const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
const unsigned kSRegSize = 32;
const unsigned kSRegSizeLog2 = 5;
const unsigned kSRegSizeInBytes = kSRegSize / 8;
const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
const unsigned kDRegSize = 64;
const unsigned kDRegSizeLog2 = 6;
const unsigned kDRegSizeInBytes = kDRegSize / 8;
const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
const unsigned kQRegSize = 128;
const unsigned kQRegSizeLog2 = 7;
const unsigned kQRegSizeInBytes = kQRegSize / 8;
const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
const uint64_t kWRegMask = UINT64_C(0xffffffff);
const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
const uint64_t kHRegMask = UINT64_C(0xffff);
const uint64_t kSRegMask = UINT64_C(0xffffffff);
const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
const uint64_t kSSignMask = UINT64_C(0x80000000);
const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
const uint64_t kWSignMask = UINT64_C(0x80000000);
const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
const uint64_t kByteMask = UINT64_C(0xff);
const uint64_t kHalfWordMask = UINT64_C(0xffff);
const uint64_t kWordMask = UINT64_C(0xffffffff);
const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
const uint64_t kHMaxUInt = UINT64_C(0xffff);
// Define k*MinInt with "-k*MaxInt - 1", because the hexadecimal representation
// (e.g. "INT32_C(0x80000000)") has implementation-defined behaviour.
const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
const int64_t kXMinInt = -kXMaxInt - 1;
const int32_t kWMaxInt = INT32_C(0x7fffffff);
const int32_t kWMinInt = -kWMaxInt - 1;
const int16_t kHMaxInt = INT16_C(0x7fff);
const int16_t kHMinInt = -kHMaxInt - 1;
const unsigned kFpRegCode = 29;
const unsigned kLinkRegCode = 30;
const unsigned kSpRegCode = 31;
const unsigned kZeroRegCode = 31;
const unsigned kSPRegInternalCode = 63;
const unsigned kRegCodeMask = 0x1f;
const unsigned kAddressTagOffset = 56;
const unsigned kAddressTagWidth = 8;
const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1)
<< kAddressTagOffset;
VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
const uint64_t kTTBRMask = UINT64_C(1) << 55;
// Make these moved float constants backwards compatible
// with explicit vixl::aarch64:: namespace references.
using vixl::kDoubleMantissaBits;
using vixl::kDoubleExponentBits;
using vixl::kFloatMantissaBits;
using vixl::kFloatExponentBits;
using vixl::kFloat16MantissaBits;
using vixl::kFloat16ExponentBits;
using vixl::kFP16PositiveInfinity;
using vixl::kFP16NegativeInfinity;
using vixl::kFP32PositiveInfinity;
using vixl::kFP32NegativeInfinity;
using vixl::kFP64PositiveInfinity;
using vixl::kFP64NegativeInfinity;
using vixl::kFP16DefaultNaN;
using vixl::kFP32DefaultNaN;
using vixl::kFP64DefaultNaN;
unsigned CalcLSDataSize(LoadStoreOp op);
unsigned CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
CondBranchType = 1,
UncondBranchType = 2,
CompareBranchType = 3,
TestBranchType = 4
};
enum AddrMode { Offset, PreIndex, PostIndex };
enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
// Instructions. ---------------------------------------------------------------
class Instruction {
public:
Instr GetInstructionBits() const {
return *(reinterpret_cast<const Instr*>(this));
}
VIXL_DEPRECATED("GetInstructionBits", Instr InstructionBits() const) {
return GetInstructionBits();
}
void SetInstructionBits(Instr new_instr) {
*(reinterpret_cast<Instr*>(this)) = new_instr;
}
int ExtractBit(int pos) const { return (GetInstructionBits() >> pos) & 1; }
VIXL_DEPRECATED("ExtractBit", int Bit(int pos) const) {
return ExtractBit(pos);
}
uint32_t ExtractBits(int msb, int lsb) const {
return ExtractUnsignedBitfield32(msb, lsb, GetInstructionBits());
}
VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) {
return ExtractBits(msb, lsb);
}
int32_t ExtractSignedBits(int msb, int lsb) const {
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
return ExtractSignedBitfield32(msb, lsb, bits);
}
VIXL_DEPRECATED("ExtractSignedBits",
int32_t SignedBits(int msb, int lsb) const) {
return ExtractSignedBits(msb, lsb);
}
Instr Mask(uint32_t mask) const {
VIXL_ASSERT(mask != 0);
return GetInstructionBits() & mask;
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int32_t Get##Name() const { return this->Func(HighBit, LowBit); } \
VIXL_DEPRECATED("Get" #Name, int32_t Name() const) { return Get##Name(); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi.
int GetImmPCRel() const {
uint32_t hi = static_cast<uint32_t>(GetImmPCRelHi());
uint32_t lo = GetImmPCRelLo();
uint32_t offset = (hi << ImmPCRelLo_width) | lo;
int width = ImmPCRelLo_width + ImmPCRelHi_width;
return ExtractSignedBitfield32(width - 1, 0, offset);
}
VIXL_DEPRECATED("GetImmPCRel", int ImmPCRel() const) { return GetImmPCRel(); }
uint64_t GetImmLogical() const;
VIXL_DEPRECATED("GetImmLogical", uint64_t ImmLogical() const) {
return GetImmLogical();
}
unsigned GetImmNEONabcdefgh() const;
VIXL_DEPRECATED("GetImmNEONabcdefgh", unsigned ImmNEONabcdefgh() const) {
return GetImmNEONabcdefgh();
}
Float16 GetImmFP16() const;
float GetImmFP32() const;
VIXL_DEPRECATED("GetImmFP32", float ImmFP32() const) { return GetImmFP32(); }
double GetImmFP64() const;
VIXL_DEPRECATED("GetImmFP64", double ImmFP64() const) { return GetImmFP64(); }
Float16 GetImmNEONFP16() const;
float GetImmNEONFP32() const;
VIXL_DEPRECATED("GetImmNEONFP32", float ImmNEONFP32() const) {
return GetImmNEONFP32();
}
double GetImmNEONFP64() const;
VIXL_DEPRECATED("GetImmNEONFP64", double ImmNEONFP64() const) {
return GetImmNEONFP64();
}
unsigned GetSizeLS() const {
return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
}
VIXL_DEPRECATED("GetSizeLS", unsigned SizeLS() const) { return GetSizeLS(); }
unsigned GetSizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
VIXL_DEPRECATED("GetSizeLSPair", unsigned SizeLSPair() const) {
return GetSizeLSPair();
}
int GetNEONLSIndex(int access_size_shift) const {
int64_t q = GetNEONQ();
int64_t s = GetNEONS();
int64_t size = GetNEONLSSize();
int64_t index = (q << 3) | (s << 2) | size;
return static_cast<int>(index >> access_size_shift);
}
VIXL_DEPRECATED("GetNEONLSIndex",
int NEONLSIndex(int access_size_shift) const) {
return GetNEONLSIndex(access_size_shift);
}
// Helpers.
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
}
bool IsUncondBranchImm() const {
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
}
bool IsCompareBranch() const {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; }
bool IsImmBranch() const { return GetBranchType() != UnknownBranchType; }
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
bool IsAddSubImmediate() const {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
}
bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
}
bool IsLoadOrStore() const {
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
}
bool IsLoad() const;
bool IsStore() const;
bool IsLoadLiteral() const {
// This includes PRFM_lit.
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
bool IsMovn() const {
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
(Mask(MoveWideImmediateMask) == MOVN_w);
}
static int GetImmBranchRangeBitwidth(ImmBranchType branch_type);
VIXL_DEPRECATED(
"GetImmBranchRangeBitwidth",
static int ImmBranchRangeBitwidth(ImmBranchType branch_type)) {
return GetImmBranchRangeBitwidth(branch_type);
}
static int32_t GetImmBranchForwardRange(ImmBranchType branch_type);
VIXL_DEPRECATED(
"GetImmBranchForwardRange",
static int32_t ImmBranchForwardRange(ImmBranchType branch_type)) {
return GetImmBranchForwardRange(branch_type);
}
static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode GetRdMode() const {
// The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
// Otherwise, r31 is the zero register.
if (IsAddSubImmediate() || IsAddSubExtended()) {
if (Mask(AddSubSetFlagsBit)) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
// can set the flags. The others can all write into sp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
return Reg31IsZeroRegister;
}
VIXL_DEPRECATED("GetRdMode", Reg31Mode RdMode() const) { return GetRdMode(); }
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode GetRnMode() const {
// The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).
// Otherwise, r31 is the zero register.
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
return Reg31IsStackPointer;
}
return Reg31IsZeroRegister;
}
VIXL_DEPRECATED("GetRnMode", Reg31Mode RnMode() const) { return GetRnMode(); }
ImmBranchType GetBranchType() const {
if (IsCondBranchImm()) {
return CondBranchType;
} else if (IsUncondBranchImm()) {
return UncondBranchType;
} else if (IsCompareBranch()) {
return CompareBranchType;
} else if (IsTestBranch()) {
return TestBranchType;
} else {
return UnknownBranchType;
}
}
VIXL_DEPRECATED("GetBranchType", ImmBranchType BranchType() const) {
return GetBranchType();
}
// Find the target of this instruction. 'this' may be a branch or a
// PC-relative addressing instruction.
const Instruction* GetImmPCOffsetTarget() const;
VIXL_DEPRECATED("GetImmPCOffsetTarget",
const Instruction* ImmPCOffsetTarget() const) {
return GetImmPCOffsetTarget();
}
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(const Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(const Instruction* source);
// The range of a load literal instruction, expressed as 'instr +- range'.
// The range is actually the 'positive' range; the branch instruction can
// target [instr - range - kInstructionSize, instr + range].
static const int kLoadLiteralImmBitwidth = 19;
static const int kLoadLiteralRange =
(1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
// Calculate the address of a literal referred to by a load-literal
// instruction, and return it as the specified type.
//
// The literal itself is safely mutable only if the backing buffer is safely
// mutable.
template <typename T>
T GetLiteralAddress() const {
uint64_t base_raw = reinterpret_cast<uint64_t>(this);
int64_t offset = GetImmLLiteral() * static_cast<int>(kLiteralEntrySize);
uint64_t address_raw = base_raw + offset;
// Cast the address using a C-style cast. A reinterpret_cast would be
// appropriate, but it can't cast one integral type to another.
T address = (T)(address_raw);
// Assert that the address can be represented by the specified type.
VIXL_ASSERT((uint64_t)(address) == address_raw);
return address;
}
template <typename T>
VIXL_DEPRECATED("GetLiteralAddress", T LiteralAddress() const) {
return GetLiteralAddress<T>();
}
uint32_t GetLiteral32() const {
uint32_t literal;
memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
return literal;
}
VIXL_DEPRECATED("GetLiteral32", uint32_t Literal32() const) {
return GetLiteral32();
}
uint64_t GetLiteral64() const {
uint64_t literal;
memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
return literal;
}
VIXL_DEPRECATED("GetLiteral64", uint64_t Literal64() const) {
return GetLiteral64();
}
float GetLiteralFP32() const { return RawbitsToFloat(GetLiteral32()); }
VIXL_DEPRECATED("GetLiteralFP32", float LiteralFP32() const) {
return GetLiteralFP32();
}
double GetLiteralFP64() const { return RawbitsToDouble(GetLiteral64()); }
VIXL_DEPRECATED("GetLiteralFP64", double LiteralFP64() const) {
return GetLiteralFP64();
}
Instruction* GetNextInstruction() { return this + kInstructionSize; }
const Instruction* GetNextInstruction() const {
return this + kInstructionSize;
}
VIXL_DEPRECATED("GetNextInstruction",
const Instruction* NextInstruction() const) {
return GetNextInstruction();
}
const Instruction* GetInstructionAtOffset(int64_t offset) const {
VIXL_ASSERT(IsWordAligned(this + offset));
return this + offset;
}
VIXL_DEPRECATED("GetInstructionAtOffset",
const Instruction* InstructionAtOffset(int64_t offset)
const) {
return GetInstructionAtOffset(offset);
}
template <typename T>
static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
template <typename T>
static const Instruction* CastConst(T src) {
return reinterpret_cast<const Instruction*>(src);
}
private:
int GetImmBranch() const;
static Float16 Imm8ToFloat16(uint32_t imm8);
static float Imm8ToFP32(uint32_t imm8);
static double Imm8ToFP64(uint32_t imm8);
void SetPCRelImmTarget(const Instruction* target);
void SetBranchImmTarget(const Instruction* target);
};
// Functions for handling NEON vector format information.
enum VectorFormat {
kFormatUndefined = 0xffffffff,
kFormat8B = NEON_8B,
kFormat16B = NEON_16B,
kFormat4H = NEON_4H,
kFormat8H = NEON_8H,
kFormat2S = NEON_2S,
kFormat4S = NEON_4S,
kFormat1D = NEON_1D,
kFormat2D = NEON_2D,
// Scalar formats. We add the scalar bit to distinguish between scalar and
// vector enumerations; the bit is always set in the encoding of scalar ops
// and always clear for vector ops. Although kFormatD and kFormat1D appear
// to be the same, their meaning is subtly different. The first is a scalar
// operation, the second a vector operation that only affects one lane.
kFormatB = NEON_B | NEONScalar,
kFormatH = NEON_H | NEONScalar,
kFormatS = NEON_S | NEONScalar,
kFormatD = NEON_D | NEONScalar,
// A value invented solely for FP16 scalar pairwise simulator trace tests.
kFormat2H = 0xfffffffe
};
const int kMaxLanesPerVector = 16;
VectorFormat VectorFormatHalfWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatHalfLanes(VectorFormat vform);
VectorFormat ScalarFormatFromLaneSize(int lanesize);
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatFillQ(VectorFormat vform);
VectorFormat ScalarFormatFromFormat(VectorFormat vform);
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
// TODO: Make the return types of these functions consistent.
unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
int LaneSizeInBytesFromFormat(VectorFormat vform);
int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
int LaneCountFromFormat(VectorFormat vform);
int MaxLaneCountFromFormat(VectorFormat vform);
bool IsVectorFormat(VectorFormat vform);
int64_t MaxIntFromFormat(VectorFormat vform);
int64_t MinIntFromFormat(VectorFormat vform);
uint64_t MaxUintFromFormat(VectorFormat vform);
// clang-format off
enum NEONFormat {
NF_UNDEF = 0,
NF_8B = 1,
NF_16B = 2,
NF_4H = 3,
NF_8H = 4,
NF_2S = 5,
NF_4S = 6,
NF_1D = 7,
NF_2D = 8,
NF_B = 9,
NF_H = 10,
NF_S = 11,
NF_D = 12
};
// clang-format on
static const unsigned kNEONFormatMaxBits = 6;
struct NEONFormatMap {
// The bit positions in the instruction to consider.
uint8_t bits[kNEONFormatMaxBits];
// Mapping from concatenated bits to format.
NEONFormat map[1 << kNEONFormatMaxBits];
};
class NEONFormatDecoder {
public:
enum SubstitutionMode { kPlaceholder, kFormat };
// Construct a format decoder with increasingly specific format maps for each
// subsitution. If no format map is specified, the default is the integer
// format map.
explicit NEONFormatDecoder(const Instruction* instr) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(IntegerFormatMap());
}
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(format);
}
NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(format0, format1);
}
NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1,
const NEONFormatMap* format2) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(format0, format1, format2);
}
// Set the format mapping for all or individual substitutions.
void SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format1 = NULL,
const NEONFormatMap* format2 = NULL) {
VIXL_ASSERT(format0 != NULL);
formats_[0] = format0;
formats_[1] = (format1 == NULL) ? formats_[0] : format1;
formats_[2] = (format2 == NULL) ? formats_[1] : format2;
}
void SetFormatMap(unsigned index, const NEONFormatMap* format) {
VIXL_ASSERT(index <= ArrayLength(formats_));
VIXL_ASSERT(format != NULL);
formats_[index] = format;
}
// Substitute %s in the input string with the placeholder string for each
// register, ie. "'B", "'H", etc.
const char* SubstitutePlaceholders(const char* string) {
return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
}
// Substitute %s in the input string with a new string based on the
// substitution mode.
const char* Substitute(const char* string,
SubstitutionMode mode0 = kFormat,
SubstitutionMode mode1 = kFormat,
SubstitutionMode mode2 = kFormat) {
snprintf(form_buffer_,
sizeof(form_buffer_),
string,
GetSubstitute(0, mode0),
GetSubstitute(1, mode1),
GetSubstitute(2, mode2));
return form_buffer_;
}
// Append a "2" to a mnemonic string based of the state of the Q bit.
const char* Mnemonic(const char* mnemonic) {
if ((instrbits_ & NEON_Q) != 0) {
snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
return mne_buffer_;
}
return mnemonic;
}
VectorFormat GetVectorFormat(int format_index = 0) {
return GetVectorFormat(formats_[format_index]);
}
VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
static const VectorFormat vform[] = {kFormatUndefined,
kFormat8B,
kFormat16B,
kFormat4H,
kFormat8H,
kFormat2S,
kFormat4S,
kFormat1D,
kFormat2D,
kFormatB,
kFormatH,
kFormatS,
kFormatD};
VIXL_ASSERT(GetNEONFormat(format_map) < ArrayLength(vform));
return vform[GetNEONFormat(format_map)];
}
// Built in mappings for common cases.
// The integer format map uses three bits (Q, size<1:0>) to encode the
// "standard" set of NEON integer vector formats.
static const NEONFormatMap* IntegerFormatMap() {
static const NEONFormatMap map =
{{23, 22, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The long integer format map uses two bits (size<1:0>) to encode the
// long set of NEON integer vector formats. These are used in narrow, wide
// and long operations.
static const NEONFormatMap* LongIntegerFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
return &map;
}
// The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
// formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap* FPFormatMap() {
// The FP format map assumes two bits (Q, size<0>) are used to encode the
// NEON FP vector formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap map = {{22, 30},
{NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The FP16 format map uses one bit (Q) to encode the NEON vector format:
// NF_4H, NF_8H.
static const NEONFormatMap* FP16FormatMap() {
static const NEONFormatMap map = {{30}, {NF_4H, NF_8H}};
return &map;
}
// The load/store format map uses three bits (Q, 11, 10) to encode the
// set of NEON vector formats.
static const NEONFormatMap* LoadStoreFormatMap() {
static const NEONFormatMap map =
{{11, 10, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
return &map;
}
// The logical format map uses one bit (Q) to encode the NEON vector format:
// NF_8B, NF_16B.
static const NEONFormatMap* LogicalFormatMap() {
static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
return &map;
}
// The triangular format map uses between two and five bits to encode the NEON
// vector format:
// xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
// x1000->2S, x1001->4S, 10001->2D, all others undefined.
static const NEONFormatMap* TriangularFormatMap() {
static const NEONFormatMap map =
{{19, 18, 17, 16, 30},
{NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
return &map;
}
// The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
// formats: NF_B, NF_H, NF_S, NF_D.
static const NEONFormatMap* ScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
return &map;
}
// The long scalar format map uses two bits (size<1:0>) to encode the longer
// NEON scalar formats: NF_H, NF_S, NF_D.
static const NEONFormatMap* LongScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
return &map;
}
// The FP scalar format map assumes one bit (size<0>) is used to encode the
// NEON FP scalar formats: NF_S, NF_D.
static const NEONFormatMap* FPScalarFormatMap() {
static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
return &map;
}
// The FP scalar pairwise format map assumes two bits (U, size<0>) are used to
// encode the NEON FP scalar formats: NF_H, NF_S, NF_D.
static const NEONFormatMap* FPScalarPairwiseFormatMap() {
static const NEONFormatMap map = {{29, 22}, {NF_H, NF_UNDEF, NF_S, NF_D}};
return &map;
}
// The triangular scalar format map uses between one and four bits to encode
// the NEON FP scalar formats:
// xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
static const NEONFormatMap* TriangularScalarFormatMap() {
static const NEONFormatMap map = {{19, 18, 17, 16},
{NF_UNDEF,
NF_B,
NF_H,
NF_B,
NF_S,
NF_B,
NF_H,
NF_B,
NF_D,
NF_B,
NF_H,
NF_B,
NF_S,
NF_B,
NF_H,
NF_B}};
return &map;
}
private:
// Get a pointer to a string that represents the format or placeholder for
// the specified substitution index, based on the format map and instruction.
const char* GetSubstitute(int index, SubstitutionMode mode) {
if (mode == kFormat) {
return NEONFormatAsString(GetNEONFormat(formats_[index]));
}
VIXL_ASSERT(mode == kPlaceholder);
return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
}
// Get the NEONFormat enumerated value for bits obtained from the
// instruction based on the specified format mapping.
NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
return format_map->map[PickBits(format_map->bits)];
}
// Convert a NEONFormat into a string.
static const char* NEONFormatAsString(NEONFormat format) {
// clang-format off
static const char* formats[] = {
"undefined",
"8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
"b", "h", "s", "d"
};
// clang-format on
VIXL_ASSERT(format < ArrayLength(formats));
return formats[format];
}
// Convert a NEONFormat into a register placeholder string.
static const char* NEONFormatAsPlaceholder(NEONFormat format) {
VIXL_ASSERT((format == NF_B) || (format == NF_H) || (format == NF_S) ||
(format == NF_D) || (format == NF_UNDEF));
// clang-format off
static const char* formats[] = {
"undefined",
"undefined", "undefined", "undefined", "undefined",
"undefined", "undefined", "undefined", "undefined",
"'B", "'H", "'S", "'D"
};
// clang-format on
return formats[format];
}
// Select bits from instrbits_ defined by the bits array, concatenate them,
// and return the value.
uint8_t PickBits(const uint8_t bits[]) {
uint8_t result = 0;
for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
if (bits[b] == 0) break;
result <<= 1;
result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
}
return result;
}
Instr instrbits_;
const NEONFormatMap* formats_[3];
char form_buffer_[64];
char mne_buffer_[16];
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_

View File

@ -0,0 +1,117 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_INSTRUMENT_AARCH64_H_
#define VIXL_AARCH64_INSTRUMENT_AARCH64_H_
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "constants-aarch64.h"
#include "decoder-aarch64.h"
#include "instrument-aarch64.h"
namespace vixl {
namespace aarch64 {
const int kCounterNameMaxLength = 256;
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
enum InstrumentState { InstrumentStateDisable = 0, InstrumentStateEnable = 1 };
enum CounterType {
Gauge = 0, // Gauge counters reset themselves after reading.
Cumulative = 1 // Cumulative counters keep their value after reading.
};
class Counter {
public:
explicit Counter(const char* name, CounterType type = Gauge);
void Increment();
void Enable();
void Disable();
bool IsEnabled();
uint64_t GetCount();
VIXL_DEPRECATED("GetCount", uint64_t count()) { return GetCount(); }
const char* GetName();
VIXL_DEPRECATED("GetName", const char* name()) { return GetName(); }
CounterType GetType();
VIXL_DEPRECATED("GetType", CounterType type()) { return GetType(); }
private:
char name_[kCounterNameMaxLength];
uint64_t count_;
bool enabled_;
CounterType type_;
};
class Instrument : public DecoderVisitor {
public:
explicit Instrument(
const char* datafile = NULL,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
void Enable();
void Disable();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
void Update();
void DumpCounters();
void DumpCounterNames();
void DumpEventMarker(unsigned marker);
void HandleInstrumentationEvent(unsigned event);
Counter* GetCounter(const char* name);
void InstrumentLoadStore(const Instruction* instr);
void InstrumentLoadStorePair(const Instruction* instr);
std::list<Counter*> counters_;
FILE* output_stream_;
// Counter information is dumped every sample_period_ instructions decoded.
// For a sample_period_ = 0 a final counter value is only produced when the
// Instrumentation class is destroyed.
uint64_t sample_period_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_INSTRUMENT_AARCH64_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,993 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_
#define VIXL_AARCH64_OPERANDS_AARCH64_H_
#include "instructions-aarch64.h"
namespace vixl {
namespace aarch64 {
typedef uint64_t RegList;
static const int kRegListSizeInBits = sizeof(RegList) * 8;
// Registers.
// Some CPURegister methods can return Register or VRegister types, so we need
// to declare them in advance.
class Register;
class VRegister;
class CPURegister {
public:
enum RegisterType {
// The kInvalid value is used to detect uninitialized static instances,
// which are always zero-initialized before any constructors are called.
kInvalid = 0,
kRegister,
kVRegister,
kFPRegister = kVRegister,
kNoRegister
};
CPURegister() : code_(0), size_(0), type_(kNoRegister) {
VIXL_ASSERT(!IsValid());
VIXL_ASSERT(IsNone());
}
CPURegister(unsigned code, unsigned size, RegisterType type)
: code_(code), size_(size), type_(type) {
VIXL_ASSERT(IsValidOrNone());
}
unsigned GetCode() const {
VIXL_ASSERT(IsValid());
return code_;
}
VIXL_DEPRECATED("GetCode", unsigned code() const) { return GetCode(); }
RegisterType GetType() const {
VIXL_ASSERT(IsValidOrNone());
return type_;
}
VIXL_DEPRECATED("GetType", RegisterType type() const) { return GetType(); }
RegList GetBit() const {
VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
}
VIXL_DEPRECATED("GetBit", RegList Bit() const) { return GetBit(); }
int GetSizeInBytes() const {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(size_ % 8 == 0);
return size_ / 8;
}
VIXL_DEPRECATED("GetSizeInBytes", int SizeInBytes() const) {
return GetSizeInBytes();
}
int GetSizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
VIXL_DEPRECATED("GetSizeInBits", unsigned size() const) {
return GetSizeInBits();
}
VIXL_DEPRECATED("GetSizeInBits", int SizeInBits() const) {
return GetSizeInBits();
}
bool Is8Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 8;
}
bool Is16Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 16;
}
bool Is32Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 32;
}
bool Is64Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 64;
}
bool Is128Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 128;
}
bool IsValid() const {
if (IsValidRegister() || IsValidVRegister()) {
VIXL_ASSERT(!IsNone());
return true;
} else {
// This assert is hit when the register has not been properly initialized.
// One cause for this can be an initialisation order fiasco. See
// https://isocpp.org/wiki/faq/ctors#static-init-order for some details.
VIXL_ASSERT(IsNone());
return false;
}
}
bool IsValidRegister() const {
return IsRegister() && ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
}
bool IsValidVRegister() const {
return IsVRegister() && ((size_ == kBRegSize) || (size_ == kHRegSize) ||
(size_ == kSRegSize) || (size_ == kDRegSize) ||
(size_ == kQRegSize)) &&
(code_ < kNumberOfVRegisters);
}
bool IsValidFPRegister() const {
return IsFPRegister() && (code_ < kNumberOfVRegisters);
}
bool IsNone() const {
// kNoRegister types should always have size 0 and code 0.
VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
return type_ == kNoRegister;
}
bool Aliases(const CPURegister& other) const {
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
return (code_ == other.code_) && (type_ == other.type_);
}
bool Is(const CPURegister& other) const {
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
return Aliases(other) && (size_ == other.size_);
}
bool IsZero() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kZeroRegCode);
}
bool IsSP() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kSPRegInternalCode);
}
bool IsRegister() const { return type_ == kRegister; }
bool IsVRegister() const { return type_ == kVRegister; }
bool IsFPRegister() const { return IsS() || IsD(); }
bool IsW() const { return IsValidRegister() && Is32Bits(); }
bool IsX() const { return IsValidRegister() && Is64Bits(); }
// These assertions ensure that the size and type of the register are as
// described. They do not consider the number of lanes that make up a vector.
// So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
// does not imply Is1D() or Is8B().
// Check the number of lanes, ie. the format of the vector, using methods such
// as Is8B(), Is1D(), etc. in the VRegister class.
bool IsV() const { return IsVRegister(); }
bool IsB() const { return IsV() && Is8Bits(); }
bool IsH() const { return IsV() && Is16Bits(); }
bool IsS() const { return IsV() && Is32Bits(); }
bool IsD() const { return IsV() && Is64Bits(); }
bool IsQ() const { return IsV() && Is128Bits(); }
// Semantic type for sdot and udot instructions.
bool IsS4B() const { return IsS(); }
const VRegister& S4B() const { return S(); }
const Register& W() const;
const Register& X() const;
const VRegister& V() const;
const VRegister& B() const;
const VRegister& H() const;
const VRegister& S() const;
const VRegister& D() const;
const VRegister& Q() const;
bool IsSameType(const CPURegister& other) const {
return type_ == other.type_;
}
bool IsSameSizeAndType(const CPURegister& other) const {
return (size_ == other.size_) && IsSameType(other);
}
protected:
unsigned code_;
int size_;
RegisterType type_;
private:
bool IsValidOrNone() const { return IsValid() || IsNone(); }
};
class Register : public CPURegister {
public:
Register() : CPURegister() {}
explicit Register(const CPURegister& other)
: CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()) {
VIXL_ASSERT(IsValidRegister());
}
Register(unsigned code, unsigned size) : CPURegister(code, size, kRegister) {}
bool IsValid() const {
VIXL_ASSERT(IsRegister() || IsNone());
return IsValidRegister();
}
static const Register& GetWRegFromCode(unsigned code);
VIXL_DEPRECATED("GetWRegFromCode",
static const Register& WRegFromCode(unsigned code)) {
return GetWRegFromCode(code);
}
static const Register& GetXRegFromCode(unsigned code);
VIXL_DEPRECATED("GetXRegFromCode",
static const Register& XRegFromCode(unsigned code)) {
return GetXRegFromCode(code);
}
private:
static const Register wregisters[];
static const Register xregisters[];
};
namespace internal {
template <int size_in_bits>
class FixedSizeRegister : public Register {
public:
FixedSizeRegister() : Register() {}
explicit FixedSizeRegister(unsigned code) : Register(code, size_in_bits) {
VIXL_ASSERT(IsValidRegister());
}
explicit FixedSizeRegister(const Register& other)
: Register(other.GetCode(), size_in_bits) {
VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
VIXL_ASSERT(IsValidRegister());
}
explicit FixedSizeRegister(const CPURegister& other)
: Register(other.GetCode(), other.GetSizeInBits()) {
VIXL_ASSERT(other.GetType() == kRegister);
VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
VIXL_ASSERT(IsValidRegister());
}
bool IsValid() const {
return Register::IsValid() && (GetSizeInBits() == size_in_bits);
}
};
} // namespace internal
typedef internal::FixedSizeRegister<kXRegSize> XRegister;
typedef internal::FixedSizeRegister<kWRegSize> WRegister;
class VRegister : public CPURegister {
public:
VRegister() : CPURegister(), lanes_(1) {}
explicit VRegister(const CPURegister& other)
: CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()),
lanes_(1) {
VIXL_ASSERT(IsValidVRegister());
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
VRegister(unsigned code, unsigned size, unsigned lanes = 1)
: CPURegister(code, size, kVRegister), lanes_(lanes) {
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
VRegister(unsigned code, VectorFormat format)
: CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister),
lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) {
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
bool IsValid() const {
VIXL_ASSERT(IsVRegister() || IsNone());
return IsValidVRegister();
}
static const VRegister& GetBRegFromCode(unsigned code);
VIXL_DEPRECATED("GetBRegFromCode",
static const VRegister& BRegFromCode(unsigned code)) {
return GetBRegFromCode(code);
}
static const VRegister& GetHRegFromCode(unsigned code);
VIXL_DEPRECATED("GetHRegFromCode",
static const VRegister& HRegFromCode(unsigned code)) {
return GetHRegFromCode(code);
}
static const VRegister& GetSRegFromCode(unsigned code);
VIXL_DEPRECATED("GetSRegFromCode",
static const VRegister& SRegFromCode(unsigned code)) {
return GetSRegFromCode(code);
}
static const VRegister& GetDRegFromCode(unsigned code);
VIXL_DEPRECATED("GetDRegFromCode",
static const VRegister& DRegFromCode(unsigned code)) {
return GetDRegFromCode(code);
}
static const VRegister& GetQRegFromCode(unsigned code);
VIXL_DEPRECATED("GetQRegFromCode",
static const VRegister& QRegFromCode(unsigned code)) {
return GetQRegFromCode(code);
}
static const VRegister& GetVRegFromCode(unsigned code);
VIXL_DEPRECATED("GetVRegFromCode",
static const VRegister& VRegFromCode(unsigned code)) {
return GetVRegFromCode(code);
}
VRegister V8B() const { return VRegister(code_, kDRegSize, 8); }
VRegister V16B() const { return VRegister(code_, kQRegSize, 16); }
VRegister V2H() const { return VRegister(code_, kSRegSize, 2); }
VRegister V4H() const { return VRegister(code_, kDRegSize, 4); }
VRegister V8H() const { return VRegister(code_, kQRegSize, 8); }
VRegister V2S() const { return VRegister(code_, kDRegSize, 2); }
VRegister V4S() const { return VRegister(code_, kQRegSize, 4); }
VRegister V2D() const { return VRegister(code_, kQRegSize, 2); }
VRegister V1D() const { return VRegister(code_, kDRegSize, 1); }
bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); }
bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); }
bool Is2H() const { return (Is32Bits() && (lanes_ == 2)); }
bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); }
bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); }
bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); }
bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); }
bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); }
bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); }
// For consistency, we assert the number of lanes of these scalar registers,
// even though there are no vectors of equivalent total size with which they
// could alias.
bool Is1B() const {
VIXL_ASSERT(!(Is8Bits() && IsVector()));
return Is8Bits();
}
bool Is1H() const {
VIXL_ASSERT(!(Is16Bits() && IsVector()));
return Is16Bits();
}
bool Is1S() const {
VIXL_ASSERT(!(Is32Bits() && IsVector()));
return Is32Bits();
}
// Semantic type for sdot and udot instructions.
bool Is1S4B() const { return Is1S(); }
bool IsLaneSizeB() const { return GetLaneSizeInBits() == kBRegSize; }
bool IsLaneSizeH() const { return GetLaneSizeInBits() == kHRegSize; }
bool IsLaneSizeS() const { return GetLaneSizeInBits() == kSRegSize; }
bool IsLaneSizeD() const { return GetLaneSizeInBits() == kDRegSize; }
int GetLanes() const { return lanes_; }
VIXL_DEPRECATED("GetLanes", int lanes() const) { return GetLanes(); }
bool IsScalar() const { return lanes_ == 1; }
bool IsVector() const { return lanes_ > 1; }
bool IsSameFormat(const VRegister& other) const {
return (size_ == other.size_) && (lanes_ == other.lanes_);
}
unsigned GetLaneSizeInBytes() const { return GetSizeInBytes() / lanes_; }
VIXL_DEPRECATED("GetLaneSizeInBytes", unsigned LaneSizeInBytes() const) {
return GetLaneSizeInBytes();
}
unsigned GetLaneSizeInBits() const { return GetLaneSizeInBytes() * 8; }
VIXL_DEPRECATED("GetLaneSizeInBits", unsigned LaneSizeInBits() const) {
return GetLaneSizeInBits();
}
private:
static const VRegister bregisters[];
static const VRegister hregisters[];
static const VRegister sregisters[];
static const VRegister dregisters[];
static const VRegister qregisters[];
static const VRegister vregisters[];
int lanes_;
};
// Backward compatibility for FPRegisters.
typedef VRegister FPRegister;
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and VRegister
// variants are provided for convenience.
const Register NoReg;
const VRegister NoVReg;
const FPRegister NoFPReg; // For backward compatibility.
const CPURegister NoCPUReg;
#define DEFINE_REGISTERS(N) \
const WRegister w##N(N); \
const XRegister x##N(N);
AARCH64_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
const WRegister wsp(kSPRegInternalCode);
const XRegister sp(kSPRegInternalCode);
#define DEFINE_VREGISTERS(N) \
const VRegister b##N(N, kBRegSize); \
const VRegister h##N(N, kHRegSize); \
const VRegister s##N(N, kSRegSize); \
const VRegister d##N(N, kDRegSize); \
const VRegister q##N(N, kQRegSize); \
const VRegister v##N(N, kQRegSize);
AARCH64_REGISTER_CODE_LIST(DEFINE_VREGISTERS)
#undef DEFINE_VREGISTERS
// Register aliases.
const XRegister ip0 = x16;
const XRegister ip1 = x17;
const XRegister lr = x30;
const XRegister xzr = x31;
const WRegister wzr = w31;
// AreAliased returns true if any of the named registers overlap. Arguments
// set to NoReg are ignored. The system stack pointer may be specified.
bool AreAliased(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoReg,
const CPURegister& reg4 = NoReg,
const CPURegister& reg5 = NoReg,
const CPURegister& reg6 = NoReg,
const CPURegister& reg7 = NoReg,
const CPURegister& reg8 = NoReg);
// AreSameSizeAndType returns true if all of the specified registers have the
// same size, and are of the same type. The system stack pointer may be
// specified. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
bool AreSameSizeAndType(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoCPUReg,
const CPURegister& reg4 = NoCPUReg,
const CPURegister& reg5 = NoCPUReg,
const CPURegister& reg6 = NoCPUReg,
const CPURegister& reg7 = NoCPUReg,
const CPURegister& reg8 = NoCPUReg);
// AreEven returns true if all of the specified registers have even register
// indices. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
bool AreEven(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoReg,
const CPURegister& reg4 = NoReg,
const CPURegister& reg5 = NoReg,
const CPURegister& reg6 = NoReg,
const CPURegister& reg7 = NoReg,
const CPURegister& reg8 = NoReg);
// AreConsecutive returns true if all of the specified registers are
// consecutive in the register file. Arguments set to NoReg are ignored, as are
// any subsequent arguments. At least one argument (reg1) must be valid
// (not NoCPUReg).
bool AreConsecutive(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoCPUReg,
const CPURegister& reg4 = NoCPUReg);
// AreSameFormat returns true if all of the specified VRegisters have the same
// vector format. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoVReg).
bool AreSameFormat(const VRegister& reg1,
const VRegister& reg2,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
// AreConsecutive returns true if all of the specified VRegisters are
// consecutive in the register file. Arguments set to NoReg are ignored, as are
// any subsequent arguments. At least one argument (reg1) must be valid
// (not NoVReg).
bool AreConsecutive(const VRegister& reg1,
const VRegister& reg2,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
// Lists of registers.
class CPURegList {
public:
explicit CPURegList(CPURegister reg1,
CPURegister reg2 = NoCPUReg,
CPURegister reg3 = NoCPUReg,
CPURegister reg4 = NoCPUReg)
: list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()),
size_(reg1.GetSizeInBits()),
type_(reg1.GetType()) {
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
VIXL_ASSERT(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
VIXL_ASSERT(IsValid());
}
CPURegList(CPURegister::RegisterType type,
unsigned size,
unsigned first_reg,
unsigned last_reg)
: size_(size), type_(type) {
VIXL_ASSERT(
((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
((type == CPURegister::kVRegister) &&
(last_reg < kNumberOfVRegisters)));
VIXL_ASSERT(last_reg >= first_reg);
list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
list_ &= ~((UINT64_C(1) << first_reg) - 1);
VIXL_ASSERT(IsValid());
}
CPURegister::RegisterType GetType() const {
VIXL_ASSERT(IsValid());
return type_;
}
VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) {
return GetType();
}
// Combine another CPURegList into this one. Registers that already exist in
// this list are left unchanged. The type and size of the registers in the
// 'other' list must match those in this list.
void Combine(const CPURegList& other) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
list_ |= other.GetList();
}
// Remove every register in the other CPURegList from this one. Registers that
// do not exist in this list are ignored. The type and size of the registers
// in the 'other' list must match those in this list.
void Remove(const CPURegList& other) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
list_ &= ~other.GetList();
}
// Variants of Combine and Remove which take a single register.
void Combine(const CPURegister& other) {
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetSizeInBits() == size_);
Combine(other.GetCode());
}
void Remove(const CPURegister& other) {
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetSizeInBits() == size_);
Remove(other.GetCode());
}
// Variants of Combine and Remove which take a single register by its code;
// the type and size of the register is inferred from this list.
void Combine(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ |= (UINT64_C(1) << code);
}
void Remove(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ &= ~(UINT64_C(1) << code);
}
static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
}
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
}
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
bool Overlaps(const CPURegList& other) const {
return (type_ == other.type_) && ((list_ & other.list_) != 0);
}
RegList GetList() const {
VIXL_ASSERT(IsValid());
return list_;
}
VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); }
void SetList(RegList new_list) {
VIXL_ASSERT(IsValid());
list_ = new_list;
}
VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) {
return SetList(new_list);
}
// Remove all callee-saved registers from the list. This can be useful when
// preparing registers for an AAPCS64 function call, for example.
void RemoveCalleeSaved();
CPURegister PopLowestIndex();
CPURegister PopHighestIndex();
// AAPCS64 callee-saved registers.
static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
// AAPCS64 caller-saved registers. Note that this includes lr.
// TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
// 64-bits being caller-saved.
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
bool IsEmpty() const {
VIXL_ASSERT(IsValid());
return list_ == 0;
}
bool IncludesAliasOf(const CPURegister& other) const {
VIXL_ASSERT(IsValid());
return (type_ == other.GetType()) && ((other.GetBit() & list_) != 0);
}
bool IncludesAliasOf(int code) const {
VIXL_ASSERT(IsValid());
return ((code & list_) != 0);
}
int GetCount() const {
VIXL_ASSERT(IsValid());
return CountSetBits(list_);
}
VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); }
int GetRegisterSizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) {
return GetRegisterSizeInBits();
}
int GetRegisterSizeInBytes() const {
int size_in_bits = GetRegisterSizeInBits();
VIXL_ASSERT((size_in_bits % 8) == 0);
return size_in_bits / 8;
}
VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) {
return GetRegisterSizeInBytes();
}
unsigned GetTotalSizeInBytes() const {
VIXL_ASSERT(IsValid());
return GetRegisterSizeInBytes() * GetCount();
}
VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) {
return GetTotalSizeInBytes();
}
private:
RegList list_;
int size_;
CPURegister::RegisterType type_;
bool IsValid() const;
};
// AAPCS64 callee-saved registers.
extern const CPURegList kCalleeSaved;
extern const CPURegList kCalleeSavedV;
// AAPCS64 caller-saved registers. Note that this includes lr.
extern const CPURegList kCallerSaved;
extern const CPURegList kCallerSavedV;
// Operand.
class Operand {
public:
// #<immediate>
// where <immediate> is int64_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(int64_t immediate = 0); // NOLINT(runtime/explicit)
// rm, {<shift> #<shift_amount>}
// where <shift> is one of {LSL, LSR, ASR, ROR}.
// <shift_amount> is uint6_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(Register reg,
Shift shift = LSL,
unsigned shift_amount = 0); // NOLINT(runtime/explicit)
// rm, {<extend> {#<shift_amount>}}
// where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
// <shift_amount> is uint2_t.
explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
bool IsImmediate() const;
bool IsPlainRegister() const;
bool IsShiftedRegister() const;
bool IsExtendedRegister() const;
bool IsZero() const;
// This returns an LSL shift (<= 4) operand as an equivalent extend operand,
// which helps in the encoding of instructions that use the stack pointer.
Operand ToExtendedRegister() const;
int64_t GetImmediate() const {
VIXL_ASSERT(IsImmediate());
return immediate_;
}
VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) {
return GetImmediate();
}
int64_t GetEquivalentImmediate() const {
return IsZero() ? 0 : GetImmediate();
}
Register GetRegister() const {
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return reg_;
}
VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); }
Register GetBaseRegister() const { return GetRegister(); }
Shift GetShift() const {
VIXL_ASSERT(IsShiftedRegister());
return shift_;
}
VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
Extend GetExtend() const {
VIXL_ASSERT(IsExtendedRegister());
return extend_;
}
VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
unsigned GetShiftAmount() const {
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return shift_amount_;
}
VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
return GetShiftAmount();
}
private:
int64_t immediate_;
Register reg_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
};
// MemOperand represents the addressing mode of a load or store instruction.
class MemOperand {
public:
// Creates an invalid `MemOperand`.
MemOperand();
explicit MemOperand(Register base,
int64_t offset = 0,
AddrMode addrmode = Offset);
MemOperand(Register base,
Register regoffset,
Shift shift = LSL,
unsigned shift_amount = 0);
MemOperand(Register base,
Register regoffset,
Extend extend,
unsigned shift_amount = 0);
MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset);
const Register& GetBaseRegister() const { return base_; }
VIXL_DEPRECATED("GetBaseRegister", const Register& base() const) {
return GetBaseRegister();
}
const Register& GetRegisterOffset() const { return regoffset_; }
VIXL_DEPRECATED("GetRegisterOffset", const Register& regoffset() const) {
return GetRegisterOffset();
}
int64_t GetOffset() const { return offset_; }
VIXL_DEPRECATED("GetOffset", int64_t offset() const) { return GetOffset(); }
AddrMode GetAddrMode() const { return addrmode_; }
VIXL_DEPRECATED("GetAddrMode", AddrMode addrmode() const) {
return GetAddrMode();
}
Shift GetShift() const { return shift_; }
VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
Extend GetExtend() const { return extend_; }
VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
unsigned GetShiftAmount() const { return shift_amount_; }
VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
return GetShiftAmount();
}
bool IsImmediateOffset() const;
bool IsRegisterOffset() const;
bool IsPreIndex() const;
bool IsPostIndex() const;
void AddOffset(int64_t offset);
bool IsValid() const {
return base_.IsValid() &&
((addrmode_ == Offset) || (addrmode_ == PreIndex) ||
(addrmode_ == PostIndex)) &&
((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) &&
((offset_ == 0) || !regoffset_.IsValid());
}
bool Equals(const MemOperand& other) const {
return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) &&
(offset_ == other.offset_) && (addrmode_ == other.addrmode_) &&
(shift_ == other.shift_) && (extend_ == other.extend_) &&
(shift_amount_ == other.shift_amount_);
}
private:
Register base_;
Register regoffset_;
int64_t offset_;
AddrMode addrmode_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
};
// This an abstraction that can represent a register or memory location. The
// `MacroAssembler` provides helpers to move data between generic operands.
class GenericOperand {
public:
GenericOperand() { VIXL_ASSERT(!IsValid()); }
GenericOperand(const CPURegister& reg); // NOLINT(runtime/explicit)
GenericOperand(const MemOperand& mem_op,
size_t mem_op_size = 0); // NOLINT(runtime/explicit)
bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); }
bool Equals(const GenericOperand& other) const;
bool IsCPURegister() const {
VIXL_ASSERT(IsValid());
return cpu_register_.IsValid();
}
bool IsRegister() const {
return IsCPURegister() && cpu_register_.IsRegister();
}
bool IsVRegister() const {
return IsCPURegister() && cpu_register_.IsVRegister();
}
bool IsSameCPURegisterType(const GenericOperand& other) {
return IsCPURegister() && other.IsCPURegister() &&
GetCPURegister().IsSameType(other.GetCPURegister());
}
bool IsMemOperand() const {
VIXL_ASSERT(IsValid());
return mem_op_.IsValid();
}
CPURegister GetCPURegister() const {
VIXL_ASSERT(IsCPURegister());
return cpu_register_;
}
MemOperand GetMemOperand() const {
VIXL_ASSERT(IsMemOperand());
return mem_op_;
}
size_t GetMemOperandSizeInBytes() const {
VIXL_ASSERT(IsMemOperand());
return mem_op_size_;
}
size_t GetSizeInBytes() const {
return IsCPURegister() ? cpu_register_.GetSizeInBytes()
: GetMemOperandSizeInBytes();
}
size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; }
private:
CPURegister cpu_register_;
MemOperand mem_op_;
// The size of the memory region pointed to, in bytes.
// We only support sizes up to X/D register sizes.
size_t mem_op_size_;
};
}
} // namespace vixl::aarch64
#endif // VIXL_AARCH64_OPERANDS_AARCH64_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,192 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
#define VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
#include "instructions-aarch64.h"
namespace vixl {
namespace aarch64 {
// Debug instructions.
//
// VIXL's macro-assembler and simulator support a few pseudo instructions to
// make debugging easier. These pseudo instructions do not exist on real
// hardware.
//
// TODO: Also consider allowing these pseudo-instructions to be disabled in the
// simulator, so that users can check that the input is a valid native code.
// (This isn't possible in all cases. Printf won't work, for example.)
//
// Each debug pseudo instruction is represented by a HLT instruction. The HLT
// immediate field is used to identify the type of debug pseudo instruction.
enum DebugHltOpcode {
kUnreachableOpcode = 0xdeb0,
kPrintfOpcode,
kTraceOpcode,
kLogOpcode,
kRuntimeCallOpcode,
kSetCPUFeaturesOpcode,
kEnableCPUFeaturesOpcode,
kDisableCPUFeaturesOpcode,
kSaveCPUFeaturesOpcode,
kRestoreCPUFeaturesOpcode,
// Aliases.
kDebugHltFirstOpcode = kUnreachableOpcode,
kDebugHltLastOpcode = kLogOpcode
};
VIXL_DEPRECATED("DebugHltOpcode", typedef DebugHltOpcode DebugHltOpcodes);
// Each pseudo instruction uses a custom encoding for additional arguments, as
// described below.
// Unreachable - kUnreachableOpcode
//
// Instruction which should never be executed. This is used as a guard in parts
// of the code that should not be reachable, such as in data encoded inline in
// the instructions.
// Printf - kPrintfOpcode
// - arg_count: The number of arguments.
// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
//
// Simulate a call to printf.
//
// Floating-point and integer arguments are passed in separate sets of registers
// in AAPCS64 (even for varargs functions), so it is not possible to determine
// the type of each argument without some information about the values that were
// passed in. This information could be retrieved from the printf format string,
// but the format string is not trivial to parse so we encode the relevant
// information with the HLT instruction.
//
// Also, the following registers are populated (as if for a native Aarch64
// call):
// x0: The format string
// x1-x7: Optional arguments, if type == CPURegister::kRegister
// d0-d7: Optional arguments, if type == CPURegister::kFPRegister
const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
const unsigned kPrintfLength = 3 * kInstructionSize;
const unsigned kPrintfMaxArgCount = 4;
// The argument pattern is a set of two-bit-fields, each with one of the
// following values:
enum PrintfArgPattern {
kPrintfArgW = 1,
kPrintfArgX = 2,
// There is no kPrintfArgS because floats are always converted to doubles in C
// varargs calls.
kPrintfArgD = 3
};
static const unsigned kPrintfArgPatternBits = 2;
// Trace - kTraceOpcode
// - parameter: TraceParameter stored as a uint32_t
// - command: TraceCommand stored as a uint32_t
//
// Allow for trace management in the generated code. This enables or disables
// automatic tracing of the specified information for every simulated
// instruction.
const unsigned kTraceParamsOffset = 1 * kInstructionSize;
const unsigned kTraceCommandOffset = 2 * kInstructionSize;
const unsigned kTraceLength = 3 * kInstructionSize;
// Trace parameters.
enum TraceParameters {
LOG_DISASM = 1 << 0, // Log disassembly.
LOG_REGS = 1 << 1, // Log general purpose registers.
LOG_VREGS = 1 << 2, // Log NEON and floating-point registers.
LOG_SYSREGS = 1 << 3, // Log the flags and system registers.
LOG_WRITE = 1 << 4, // Log writes to memory.
LOG_BRANCH = 1 << 5, // Log taken branches.
LOG_NONE = 0,
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE | LOG_BRANCH
};
// Trace commands.
enum TraceCommand { TRACE_ENABLE = 1, TRACE_DISABLE = 2 };
// Log - kLogOpcode
// - parameter: TraceParameter stored as a uint32_t
//
// Print the specified information once. This mechanism is separate from Trace.
// In particular, _all_ of the specified registers are printed, rather than just
// the registers that the instruction writes.
//
// Any combination of the TraceParameters values can be used, except that
// LOG_DISASM is not supported for Log.
const unsigned kLogParamsOffset = 1 * kInstructionSize;
const unsigned kLogLength = 2 * kInstructionSize;
// Runtime call simulation - kRuntimeCallOpcode
enum RuntimeCallType { kCallRuntime, kTailCallRuntime };
const unsigned kRuntimeCallWrapperOffset = 1 * kInstructionSize;
// The size of a pointer on host.
const unsigned kRuntimeCallAddressSize = sizeof(uintptr_t);
const unsigned kRuntimeCallFunctionOffset =
kRuntimeCallWrapperOffset + kRuntimeCallAddressSize;
const unsigned kRuntimeCallTypeOffset =
kRuntimeCallFunctionOffset + kRuntimeCallAddressSize;
const unsigned kRuntimeCallLength = kRuntimeCallTypeOffset + sizeof(uint32_t);
// Enable or disable CPU features - kSetCPUFeaturesOpcode
// - kEnableCPUFeaturesOpcode
// - kDisableCPUFeaturesOpcode
// - parameter[...]: A list of `CPUFeatures::Feature`s, encoded as
// ConfigureCPUFeaturesElementType and terminated with CPUFeatures::kNone.
// - [Padding to align to kInstructionSize.]
//
// 'Set' completely overwrites the existing CPU features.
// 'Enable' and 'Disable' update the existing CPU features.
//
// These mechanisms allows users to strictly check the use of CPU features in
// different regions of code.
//
// These have no effect on the set of 'seen' features (as reported by
// CPUFeaturesAuditor::HasSeen(...)).
typedef uint8_t ConfigureCPUFeaturesElementType;
const unsigned kConfigureCPUFeaturesListOffset = 1 * kInstructionSize;
// Save or restore CPU features - kSaveCPUFeaturesOpcode
// - kRestoreCPUFeaturesOpcode
//
// These mechanisms provide a stack-like mechanism for preserving the CPU
// features, or restoring the last-preserved features. These pseudo-instructions
// take no arguments.
//
// These have no effect on the set of 'seen' features (as reported by
// CPUFeaturesAuditor::HasSeen(...)).
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_

View File

@ -0,0 +1,101 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_ASSEMBLER_BASE_H
#define VIXL_ASSEMBLER_BASE_H
#include "code-buffer-vixl.h"
namespace vixl {
class CodeBufferCheckScope;
namespace internal {
class AssemblerBase {
public:
AssemblerBase() : allow_assembler_(false) {}
explicit AssemblerBase(size_t capacity)
: buffer_(capacity), allow_assembler_(false) {}
AssemblerBase(byte* buffer, size_t capacity)
: buffer_(buffer, capacity), allow_assembler_(false) {}
virtual ~AssemblerBase() {}
// Finalize a code buffer of generated instructions. This function must be
// called before executing or copying code from the buffer.
void FinalizeCode() { GetBuffer()->SetClean(); }
ptrdiff_t GetCursorOffset() const { return GetBuffer().GetCursorOffset(); }
// Return the address of the cursor.
template <typename T>
T GetCursorAddress() const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetBuffer().GetOffsetAddress<T>(GetCursorOffset());
}
size_t GetSizeOfCodeGenerated() const { return GetCursorOffset(); }
// Accessors.
CodeBuffer* GetBuffer() { return &buffer_; }
const CodeBuffer& GetBuffer() const { return buffer_; }
bool AllowAssembler() const { return allow_assembler_; }
protected:
void SetAllowAssembler(bool allow) { allow_assembler_ = allow; }
// CodeBufferCheckScope must be able to temporarily allow the assembler.
friend class vixl::CodeBufferCheckScope;
// Buffer where the code is emitted.
CodeBuffer buffer_;
private:
bool allow_assembler_;
public:
// Deprecated public interface.
// Return the address of an offset in the buffer.
template <typename T>
VIXL_DEPRECATED("GetBuffer().GetOffsetAddress<T>(offset)",
T GetOffsetAddress(ptrdiff_t offset) const) {
return GetBuffer().GetOffsetAddress<T>(offset);
}
// Return the address of the start of the buffer.
template <typename T>
VIXL_DEPRECATED("GetBuffer().GetStartAddress<T>()",
T GetStartAddress() const) {
return GetBuffer().GetOffsetAddress<T>(0);
}
};
} // namespace internal
} // namespace vixl
#endif // VIXL_ASSEMBLER_BASE_H

View File

@ -0,0 +1,191 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CODE_BUFFER_H
#define VIXL_CODE_BUFFER_H
#include <cstring>
#include "globals-vixl.h"
#include "utils-vixl.h"
namespace vixl {
class CodeBuffer {
public:
static const size_t kDefaultCapacity = 4 * KBytes;
explicit CodeBuffer(size_t capacity = kDefaultCapacity);
CodeBuffer(byte* buffer, size_t capacity);
~CodeBuffer();
void Reset();
#ifdef VIXL_CODE_BUFFER_MMAP
void SetExecutable();
void SetWritable();
#else
// These require page-aligned memory blocks, which we can only guarantee with
// mmap.
VIXL_NO_RETURN_IN_DEBUG_MODE void SetExecutable() { VIXL_UNIMPLEMENTED(); }
VIXL_NO_RETURN_IN_DEBUG_MODE void SetWritable() { VIXL_UNIMPLEMENTED(); }
#endif
ptrdiff_t GetOffsetFrom(ptrdiff_t offset) const {
ptrdiff_t cursor_offset = cursor_ - buffer_;
VIXL_ASSERT((offset >= 0) && (offset <= cursor_offset));
return cursor_offset - offset;
}
VIXL_DEPRECATED("GetOffsetFrom",
ptrdiff_t OffsetFrom(ptrdiff_t offset) const) {
return GetOffsetFrom(offset);
}
ptrdiff_t GetCursorOffset() const { return GetOffsetFrom(0); }
VIXL_DEPRECATED("GetCursorOffset", ptrdiff_t CursorOffset() const) {
return GetCursorOffset();
}
void Rewind(ptrdiff_t offset) {
byte* rewound_cursor = buffer_ + offset;
VIXL_ASSERT((buffer_ <= rewound_cursor) && (rewound_cursor <= cursor_));
cursor_ = rewound_cursor;
}
template <typename T>
T GetOffsetAddress(ptrdiff_t offset) const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
VIXL_ASSERT((offset >= 0) && (offset <= (cursor_ - buffer_)));
return reinterpret_cast<T>(buffer_ + offset);
}
// Return the address of the start or end of the emitted code.
template <typename T>
T GetStartAddress() const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(0);
}
template <typename T>
T GetEndAddress() const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(GetSizeInBytes());
}
size_t GetRemainingBytes() const {
VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_)));
return (buffer_ + capacity_) - cursor_;
}
VIXL_DEPRECATED("GetRemainingBytes", size_t RemainingBytes() const) {
return GetRemainingBytes();
}
size_t GetSizeInBytes() const {
VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_)));
return cursor_ - buffer_;
}
// A code buffer can emit:
// * 8, 16, 32 or 64-bit data: constant.
// * 16 or 32-bit data: instruction.
// * string: debug info.
void Emit8(uint8_t data) { Emit(data); }
void Emit16(uint16_t data) { Emit(data); }
void Emit32(uint32_t data) { Emit(data); }
void Emit64(uint64_t data) { Emit(data); }
void EmitString(const char* string);
void EmitData(const void* data, size_t size);
template <typename T>
void Emit(T value) {
VIXL_ASSERT(HasSpaceFor(sizeof(value)));
dirty_ = true;
memcpy(cursor_, &value, sizeof(value));
cursor_ += sizeof(value);
}
void UpdateData(size_t offset, const void* data, size_t size);
// Align to 32bit.
void Align();
// Ensure there is enough space for and emit 'n' zero bytes.
void EmitZeroedBytes(int n);
bool Is16bitAligned() const { return IsAligned<2>(cursor_); }
bool Is32bitAligned() const { return IsAligned<4>(cursor_); }
size_t GetCapacity() const { return capacity_; }
VIXL_DEPRECATED("GetCapacity", size_t capacity() const) {
return GetCapacity();
}
bool IsManaged() const { return managed_; }
void Grow(size_t new_capacity);
bool IsDirty() const { return dirty_; }
void SetClean() { dirty_ = false; }
bool HasSpaceFor(size_t amount) const {
return GetRemainingBytes() >= amount;
}
void EnsureSpaceFor(size_t amount, bool* has_grown) {
bool is_full = !HasSpaceFor(amount);
if (is_full) Grow(capacity_ * 2 + amount);
VIXL_ASSERT(has_grown != NULL);
*has_grown = is_full;
}
void EnsureSpaceFor(size_t amount) {
bool dummy;
EnsureSpaceFor(amount, &dummy);
}
private:
// Backing store of the buffer.
byte* buffer_;
// If true the backing store is allocated and deallocated by the buffer. The
// backing store can then grow on demand. If false the backing store is
// provided by the user and cannot be resized internally.
bool managed_;
// Pointer to the next location to be written.
byte* cursor_;
// True if there has been any write since the buffer was created or cleaned.
bool dirty_;
// Capacity in bytes of the backing store.
size_t capacity_;
};
} // namespace vixl
#endif // VIXL_CODE_BUFFER_H

View File

@ -0,0 +1,322 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CODE_GENERATION_SCOPES_H_
#define VIXL_CODE_GENERATION_SCOPES_H_
#include "assembler-base-vixl.h"
#include "macro-assembler-interface.h"
namespace vixl {
// This scope will:
// - Allow code emission from the specified `Assembler`.
// - Optionally reserve space in the `CodeBuffer` (if it is managed by VIXL).
// - Optionally, on destruction, check the size of the generated code.
// (The size can be either exact or a maximum size.)
class CodeBufferCheckScope {
public:
// Tell whether or not the scope needs to ensure the associated CodeBuffer
// has enough space for the requested size.
enum BufferSpacePolicy {
kReserveBufferSpace,
kDontReserveBufferSpace,
// Deprecated, but kept for backward compatibility.
kCheck = kReserveBufferSpace,
kNoCheck = kDontReserveBufferSpace
};
// Tell whether or not the scope should assert the amount of code emitted
// within the scope is consistent with the requested amount.
enum SizePolicy {
kNoAssert, // Do not check the size of the code emitted.
kExactSize, // The code emitted must be exactly size bytes.
kMaximumSize // The code emitted must be at most size bytes.
};
// This constructor implicitly calls `Open` to initialise the scope
// (`assembler` must not be `NULL`), so it is ready to use immediately after
// it has been constructed.
CodeBufferCheckScope(internal::AssemblerBase* assembler,
size_t size,
BufferSpacePolicy check_policy = kReserveBufferSpace,
SizePolicy size_policy = kMaximumSize)
: assembler_(NULL), initialised_(false) {
Open(assembler, size, check_policy, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
CodeBufferCheckScope() : assembler_(NULL), initialised_(false) {
// Nothing to do.
}
virtual ~CodeBufferCheckScope() { Close(); }
// This function performs the actual initialisation work.
void Open(internal::AssemblerBase* assembler,
size_t size,
BufferSpacePolicy check_policy = kReserveBufferSpace,
SizePolicy size_policy = kMaximumSize) {
VIXL_ASSERT(!initialised_);
VIXL_ASSERT(assembler != NULL);
assembler_ = assembler;
if (check_policy == kReserveBufferSpace) {
assembler->GetBuffer()->EnsureSpaceFor(size);
}
#ifdef VIXL_DEBUG
limit_ = assembler_->GetSizeOfCodeGenerated() + size;
assert_policy_ = size_policy;
previous_allow_assembler_ = assembler_->AllowAssembler();
assembler_->SetAllowAssembler(true);
#else
USE(size_policy);
#endif
initialised_ = true;
}
// This function performs the cleaning-up work. It must succeed even if the
// scope has not been opened. It is safe to call multiple times.
void Close() {
#ifdef VIXL_DEBUG
if (!initialised_) {
return;
}
assembler_->SetAllowAssembler(previous_allow_assembler_);
switch (assert_policy_) {
case kNoAssert:
break;
case kExactSize:
VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() == limit_);
break;
case kMaximumSize:
VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() <= limit_);
break;
default:
VIXL_UNREACHABLE();
}
#endif
initialised_ = false;
}
protected:
internal::AssemblerBase* assembler_;
SizePolicy assert_policy_;
size_t limit_;
bool previous_allow_assembler_;
bool initialised_;
};
// This scope will:
// - Do the same as `CodeBufferCheckSCope`, but:
// - If managed by VIXL, always reserve space in the `CodeBuffer`.
// - Always check the size (exact or maximum) of the generated code on
// destruction.
// - Emit pools if the specified size would push them out of range.
// - Block pools emission for the duration of the scope.
// This scope allows the `Assembler` and `MacroAssembler` to be freely and
// safely mixed for its duration.
class EmissionCheckScope : public CodeBufferCheckScope {
public:
// This constructor implicitly calls `Open` (when `masm` is not `NULL`) to
// initialise the scope, so it is ready to use immediately after it has been
// constructed.
EmissionCheckScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kMaximumSize) {
Open(masm, size, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
EmissionCheckScope() {}
virtual ~EmissionCheckScope() { Close(); }
enum PoolPolicy {
// Do not forbid pool emission inside the scope. Pools will not be emitted
// on `Open` either.
kIgnorePools,
// Force pools to be generated on `Open` if necessary and block their
// emission inside the scope.
kBlockPools,
// Deprecated, but kept for backward compatibility.
kCheckPools = kBlockPools
};
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kMaximumSize) {
Open(masm, size, size_policy, kBlockPools);
}
void Close() {
if (!initialised_) {
return;
}
if (masm_ == NULL) {
// Nothing to do.
return;
}
// Perform the opposite of `Open`, which is:
// - Check the code generation limit was not exceeded.
// - Release the pools.
CodeBufferCheckScope::Close();
if (pool_policy_ == kBlockPools) {
masm_->ReleasePools();
}
VIXL_ASSERT(!initialised_);
}
protected:
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy,
PoolPolicy pool_policy) {
if (masm == NULL) {
// Nothing to do.
// We may reach this point in a context of conditional code generation.
// See `aarch64::MacroAssembler::MoveImmediateHelper()` for an example.
return;
}
masm_ = masm;
pool_policy_ = pool_policy;
if (pool_policy_ == kBlockPools) {
// To avoid duplicating the work to check that enough space is available
// in the buffer, do not use the more generic `EnsureEmitFor()`. It is
// done below when opening `CodeBufferCheckScope`.
masm->EnsureEmitPoolsFor(size);
masm->BlockPools();
}
// The buffer should be checked *after* we emit the pools.
CodeBufferCheckScope::Open(masm->AsAssemblerBase(),
size,
kReserveBufferSpace,
size_policy);
VIXL_ASSERT(initialised_);
}
// This constructor should only be used from code that is *currently
// generating* the pools, to avoid an infinite loop.
EmissionCheckScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy,
PoolPolicy pool_policy) {
Open(masm, size, size_policy, pool_policy);
}
MacroAssemblerInterface* masm_;
PoolPolicy pool_policy_;
};
// Use this scope when you need a one-to-one mapping between methods and
// instructions. This scope will:
// - Do the same as `EmissionCheckScope`.
// - Block access to the MacroAssemblerInterface (using run-time assertions).
class ExactAssemblyScope : public EmissionCheckScope {
public:
// This constructor implicitly calls `Open` (when `masm` is not `NULL`) to
// initialise the scope, so it is ready to use immediately after it has been
// constructed.
ExactAssemblyScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kExactSize) {
Open(masm, size, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
ExactAssemblyScope() {}
virtual ~ExactAssemblyScope() { Close(); }
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kExactSize) {
Open(masm, size, size_policy, kBlockPools);
}
void Close() {
if (!initialised_) {
return;
}
if (masm_ == NULL) {
// Nothing to do.
return;
}
#ifdef VIXL_DEBUG
masm_->SetAllowMacroInstructions(previous_allow_macro_assembler_);
#else
USE(previous_allow_macro_assembler_);
#endif
EmissionCheckScope::Close();
}
protected:
// This protected constructor allows overriding the pool policy. It is
// available to allow this scope to be used in code that handles generation
// of pools.
ExactAssemblyScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy assert_policy,
PoolPolicy pool_policy) {
Open(masm, size, assert_policy, pool_policy);
}
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy,
PoolPolicy pool_policy) {
VIXL_ASSERT(size_policy != kNoAssert);
if (masm == NULL) {
// Nothing to do.
return;
}
// Rely on EmissionCheckScope::Open to initialise `masm_` and
// `pool_policy_`.
EmissionCheckScope::Open(masm, size, size_policy, pool_policy);
#ifdef VIXL_DEBUG
previous_allow_macro_assembler_ = masm->AllowMacroInstructions();
masm->SetAllowMacroInstructions(false);
#endif
}
private:
bool previous_allow_macro_assembler_;
};
} // namespace vixl
#endif // VIXL_CODE_GENERATION_SCOPES_H_

View File

@ -0,0 +1,160 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_COMPILER_INTRINSICS_H
#define VIXL_COMPILER_INTRINSICS_H
#include "globals-vixl.h"
namespace vixl {
// Helper to check whether the version of GCC used is greater than the specified
// requirement.
#define MAJOR 1000000
#define MINOR 1000
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR) + __GNUC_PATCHLEVEL__) >= \
((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel)))
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR)) >= \
((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel)))
#else
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0
#endif
#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS)
// clang-format off
#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb))
#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs))
#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
// clang-format on
#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS)
// The documentation for these builtins is available at:
// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html
// clang-format off
# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0))
# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0))
# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0))
# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0))
# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0))
// clang-format on
#else
// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually
// implemented C++ methods.
// clang-format off
#define COMPILER_HAS_BUILTIN_BSWAP false
#define COMPILER_HAS_BUILTIN_CLRSB false
#define COMPILER_HAS_BUILTIN_CLZ false
#define COMPILER_HAS_BUILTIN_CTZ false
#define COMPILER_HAS_BUILTIN_FFS false
#define COMPILER_HAS_BUILTIN_POPCOUNT false
// clang-format on
#endif
template <typename V>
inline bool IsPowerOf2(V value) {
return (value != 0) && ((value & (value - 1)) == 0);
}
// Declaration of fallback functions.
int CountLeadingSignBitsFallBack(int64_t value, int width);
int CountLeadingZerosFallBack(uint64_t value, int width);
int CountSetBitsFallBack(uint64_t value, int width);
int CountTrailingZerosFallBack(uint64_t value, int width);
// Implementation of intrinsics functions.
// TODO: The implementations could be improved for sizes different from 32bit
// and 64bit: we could mask the values and call the appropriate builtin.
template <typename V>
inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_CLRSB
if (width == 32) {
return __builtin_clrsb(value);
} else if (width == 64) {
return __builtin_clrsbll(value);
}
#endif
return CountLeadingSignBitsFallBack(value, width);
}
template <typename V>
inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_CLZ
if (width == 32) {
return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value));
} else if (width == 64) {
return (value == 0) ? 64 : __builtin_clzll(value);
}
#endif
return CountLeadingZerosFallBack(value, width);
}
template <typename V>
inline int CountSetBits(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_POPCOUNT
if (width == 32) {
return __builtin_popcount(static_cast<unsigned>(value));
} else if (width == 64) {
return __builtin_popcountll(value);
}
#endif
return CountSetBitsFallBack(value, width);
}
template <typename V>
inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_CTZ
if (width == 32) {
return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value));
} else if (width == 64) {
return (value == 0) ? 64 : __builtin_ctzll(value);
}
#endif
return CountTrailingZerosFallBack(value, width);
}
} // namespace vixl
#endif // VIXL_COMPILER_INTRINSICS_H

View File

@ -0,0 +1,364 @@
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CPU_FEATURES_H
#define VIXL_CPU_FEATURES_H
#include <ostream>
#include "globals-vixl.h"
namespace vixl {
// clang-format off
#define VIXL_CPU_FEATURE_LIST(V) \
/* If set, the OS traps and emulates MRS accesses to relevant (EL1) ID_* */ \
/* registers, so that the detailed feature registers can be read */ \
/* directly. */ \
V(kIDRegisterEmulation, "ID register emulation", "cpuid") \
\
V(kFP, "FP", "fp") \
V(kNEON, "NEON", "asimd") \
V(kCRC32, "CRC32", "crc32") \
/* Cryptographic support instructions. */ \
V(kAES, "AES", "aes") \
V(kSHA1, "SHA1", "sha1") \
V(kSHA2, "SHA2", "sha2") \
/* A form of PMULL{2} with a 128-bit (1Q) result. */ \
V(kPmull1Q, "Pmull1Q", "pmull") \
/* Atomic operations on memory: CAS, LDADD, STADD, SWP, etc. */ \
V(kAtomics, "Atomics", "atomics") \
/* Limited ordering regions: LDLAR, STLLR and their variants. */ \
V(kLORegions, "LORegions", NULL) \
/* Rounding doubling multiply add/subtract: SQRDMLAH and SQRDMLSH. */ \
V(kRDM, "RDM", "asimdrdm") \
/* SDOT and UDOT support (in NEON). */ \
V(kDotProduct, "DotProduct", "asimddp") \
/* Half-precision (FP16) support for FP and NEON, respectively. */ \
V(kFPHalf, "FPHalf", "fphp") \
V(kNEONHalf, "NEONHalf", "asimdhp") \
/* The RAS extension, including the ESB instruction. */ \
V(kRAS, "RAS", NULL) \
/* Data cache clean to the point of persistence: DC CVAP. */ \
V(kDCPoP, "DCPoP", "dcpop") \
/* Cryptographic support instructions. */ \
V(kSHA3, "SHA3", "sha3") \
V(kSHA512, "SHA512", "sha512") \
V(kSM3, "SM3", "sm3") \
V(kSM4, "SM4", "sm4") \
/* Pointer authentication for addresses. */ \
V(kPAuth, "PAuth", NULL) \
/* Pointer authentication for addresses uses QARMA. */ \
V(kPAuthQARMA, "PAuthQARMA", NULL) \
/* Generic authentication (using the PACGA instruction). */ \
V(kPAuthGeneric, "PAuthGeneric", NULL) \
/* Generic authentication uses QARMA. */ \
V(kPAuthGenericQARMA, "PAuthGenericQARMA", NULL) \
/* JavaScript-style FP <-> integer conversion instruction: FJCVTZS. */ \
V(kJSCVT, "JSCVT", "jscvt") \
/* RCpc-based model (for weaker release consistency): LDAPR and variants. */ \
V(kRCpc, "RCpc", "lrcpc") \
/* Complex number support for NEON: FCMLA and FCADD. */ \
V(kFcma, "Fcma", "fcma")
// clang-format on
class CPUFeaturesConstIterator;
// A representation of the set of features known to be supported by the target
// device. Each feature is represented by a simple boolean flag.
//
// - When the Assembler is asked to assemble an instruction, it asserts (in
// debug mode) that the necessary features are available.
//
// - TODO: The MacroAssembler relies on the Assembler's assertions, but in
// some cases it may be useful for macros to generate a fall-back sequence
// in case features are not available.
//
// - The Simulator assumes by default that all features are available, but it
// is possible to configure it to fail if the simulated code uses features
// that are not enabled.
//
// The Simulator also offers pseudo-instructions to allow features to be
// enabled and disabled dynamically. This is useful when you want to ensure
// that some features are constrained to certain areas of code.
//
// - The base Disassembler knows nothing about CPU features, but the
// PrintDisassembler can be configured to annotate its output with warnings
// about unavailable features. The Simulator uses this feature when
// instruction trace is enabled.
//
// - The Decoder-based components -- the Simulator and PrintDisassembler --
// rely on a CPUFeaturesAuditor visitor. This visitor keeps a list of
// features actually encountered so that a large block of code can be
// examined (either directly or through simulation), and the required
// features analysed later.
//
// Expected usage:
//
// // By default, VIXL uses CPUFeatures::AArch64LegacyBaseline(), for
// // compatibility with older version of VIXL.
// MacroAssembler masm;
//
// // Generate code only for the current CPU.
// masm.SetCPUFeatures(CPUFeatures::InferFromOS());
//
// // Turn off feature checking entirely.
// masm.SetCPUFeatures(CPUFeatures::All());
//
// Feature set manipulation:
//
// CPUFeatures f; // The default constructor gives an empty set.
// // Individual features can be added (or removed).
// f.Combine(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::AES);
// f.Remove(CPUFeatures::kNEON);
//
// // Some helpers exist for extensions that provide several features.
// f.Remove(CPUFeatures::All());
// f.Combine(CPUFeatures::AArch64LegacyBaseline());
//
// // Chained construction is also possible.
// CPUFeatures g =
// f.With(CPUFeatures::kPmull1Q).Without(CPUFeatures::kCRC32);
//
// // Features can be queried. Where multiple features are given, they are
// // combined with logical AND.
// if (h.Has(CPUFeatures::kNEON)) { ... }
// if (h.Has(CPUFeatures::kFP, CPUFeatures::kNEON)) { ... }
// if (h.Has(g)) { ... }
// // If the empty set is requested, the result is always 'true'.
// VIXL_ASSERT(h.Has(CPUFeatures()));
//
// // For debug and reporting purposes, features can be enumerated (or
// // printed directly):
// std::cout << CPUFeatures::kNEON; // Prints something like "NEON".
// std::cout << f; // Prints something like "FP, NEON, CRC32".
class CPUFeatures {
public:
// clang-format off
// Individual features.
// These should be treated as opaque tokens. User code should not rely on
// specific numeric values or ordering.
enum Feature {
// Refer to VIXL_CPU_FEATURE_LIST (above) for the list of feature names that
// this class supports.
kNone = -1,
#define VIXL_DECLARE_FEATURE(SYMBOL, NAME, CPUINFO) SYMBOL,
VIXL_CPU_FEATURE_LIST(VIXL_DECLARE_FEATURE)
#undef VIXL_DECLARE_FEATURE
kNumberOfFeatures
};
// clang-format on
// By default, construct with no features enabled.
CPUFeatures() : features_(0) {}
// Construct with some features already enabled.
CPUFeatures(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone);
// Construct with all features enabled. This can be used to disable feature
// checking: `Has(...)` returns true regardless of the argument.
static CPUFeatures All();
// Construct an empty CPUFeatures. This is equivalent to the default
// constructor, but is provided for symmetry and convenience.
static CPUFeatures None() { return CPUFeatures(); }
// The presence of these features was assumed by version of VIXL before this
// API was added, so using this set by default ensures API compatibility.
static CPUFeatures AArch64LegacyBaseline() {
return CPUFeatures(kFP, kNEON, kCRC32);
}
// Construct a new CPUFeatures object based on what the OS reports.
static CPUFeatures InferFromOS();
// Combine another CPUFeatures object into this one. Features that already
// exist in this set are left unchanged.
void Combine(const CPUFeatures& other);
// Combine specific features into this set. Features that already exist in
// this set are left unchanged.
void Combine(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone);
// Remove features in another CPUFeatures object from this one.
void Remove(const CPUFeatures& other);
// Remove specific features from this set.
void Remove(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone);
// Chaining helpers for convenient construction.
CPUFeatures With(const CPUFeatures& other) const;
CPUFeatures With(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone) const;
CPUFeatures Without(const CPUFeatures& other) const;
CPUFeatures Without(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone) const;
// Query features.
// Note that an empty query (like `Has(kNone)`) always returns true.
bool Has(const CPUFeatures& other) const;
bool Has(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone) const;
// Return the number of enabled features.
size_t Count() const;
// Check for equivalence.
bool operator==(const CPUFeatures& other) const {
return Has(other) && other.Has(*this);
}
bool operator!=(const CPUFeatures& other) const { return !(*this == other); }
typedef CPUFeaturesConstIterator const_iterator;
const_iterator begin() const;
const_iterator end() const;
private:
// Each bit represents a feature. This field will be replaced as needed if
// features are added.
uint64_t features_;
friend std::ostream& operator<<(std::ostream& os,
const vixl::CPUFeatures& features);
};
std::ostream& operator<<(std::ostream& os, vixl::CPUFeatures::Feature feature);
std::ostream& operator<<(std::ostream& os, const vixl::CPUFeatures& features);
// This is not a proper C++ iterator type, but it simulates enough of
// ForwardIterator that simple loops can be written.
class CPUFeaturesConstIterator {
public:
CPUFeaturesConstIterator(const CPUFeatures* cpu_features = NULL,
CPUFeatures::Feature start = CPUFeatures::kNone)
: cpu_features_(cpu_features), feature_(start) {
VIXL_ASSERT(IsValid());
}
bool operator==(const CPUFeaturesConstIterator& other) const;
bool operator!=(const CPUFeaturesConstIterator& other) const {
return !(*this == other);
}
CPUFeatures::Feature operator++();
CPUFeatures::Feature operator++(int);
CPUFeatures::Feature operator*() const {
VIXL_ASSERT(IsValid());
return feature_;
}
// For proper support of C++'s simplest "Iterator" concept, this class would
// have to define member types (such as CPUFeaturesIterator::pointer) to make
// it appear as if it iterates over Feature objects in memory. That is, we'd
// need CPUFeatures::iterator to behave like std::vector<Feature>::iterator.
// This is at least partially possible -- the std::vector<bool> specialisation
// does something similar -- but it doesn't seem worthwhile for a
// special-purpose debug helper, so they are omitted here.
private:
const CPUFeatures* cpu_features_;
CPUFeatures::Feature feature_;
bool IsValid() const {
return ((cpu_features_ == NULL) && (feature_ == CPUFeatures::kNone)) ||
cpu_features_->Has(feature_);
}
};
// A convenience scope for temporarily modifying a CPU features object. This
// allows features to be enabled for short sequences.
//
// Expected usage:
//
// {
// CPUFeaturesScope cpu(&masm, CPUFeatures::kCRC32);
// // This scope can now use CRC32, as well as anything else that was enabled
// // before the scope.
//
// ...
//
// // At the end of the scope, the original CPU features are restored.
// }
class CPUFeaturesScope {
public:
// Start a CPUFeaturesScope on any object that implements
// `CPUFeatures* GetCPUFeatures()`.
template <typename T>
explicit CPUFeaturesScope(T* cpu_features_wrapper,
CPUFeatures::Feature feature0 = CPUFeatures::kNone,
CPUFeatures::Feature feature1 = CPUFeatures::kNone,
CPUFeatures::Feature feature2 = CPUFeatures::kNone,
CPUFeatures::Feature feature3 = CPUFeatures::kNone)
: cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
old_features_(*cpu_features_) {
cpu_features_->Combine(feature0, feature1, feature2, feature3);
}
template <typename T>
CPUFeaturesScope(T* cpu_features_wrapper, const CPUFeatures& other)
: cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
old_features_(*cpu_features_) {
cpu_features_->Combine(other);
}
~CPUFeaturesScope() { *cpu_features_ = old_features_; }
// For advanced usage, the CPUFeatures object can be accessed directly.
// The scope will restore the original state when it ends.
CPUFeatures* GetCPUFeatures() const { return cpu_features_; }
void SetCPUFeatures(const CPUFeatures& cpu_features) {
*cpu_features_ = cpu_features;
}
private:
CPUFeatures* const cpu_features_;
const CPUFeatures old_features_;
};
} // namespace vixl
#endif // VIXL_CPU_FEATURES_H

View File

@ -0,0 +1,284 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_GLOBALS_H
#define VIXL_GLOBALS_H
// Get standard C99 macros for integer types.
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
extern "C" {
#include <inttypes.h>
#include <stdint.h>
}
#include <cassert>
#include <cstdarg>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include "platform-vixl.h"
#ifdef VIXL_NEGATIVE_TESTING
#include <sstream>
#include <stdexcept>
#include <string>
#endif
namespace vixl {
typedef uint8_t byte;
const int KBytes = 1024;
const int MBytes = 1024 * KBytes;
const int kBitsPerByte = 8;
template <int SizeInBits>
struct Unsigned;
template <>
struct Unsigned<32> {
typedef uint32_t type;
};
template <>
struct Unsigned<64> {
typedef uint64_t type;
};
} // namespace vixl
// Detect the host's pointer size.
#if (UINTPTR_MAX == UINT32_MAX)
#define VIXL_HOST_POINTER_32
#elif (UINTPTR_MAX == UINT64_MAX)
#define VIXL_HOST_POINTER_64
#else
#error "Unsupported host pointer size."
#endif
#ifdef VIXL_NEGATIVE_TESTING
#define VIXL_ABORT() \
do { \
std::ostringstream oss; \
oss << "Aborting in " << __FILE__ << ", line " << __LINE__ << std::endl; \
throw std::runtime_error(oss.str()); \
} while (false)
#define VIXL_ABORT_WITH_MSG(msg) \
do { \
std::ostringstream oss; \
oss << (msg) << "in " << __FILE__ << ", line " << __LINE__ << std::endl; \
throw std::runtime_error(oss.str()); \
} while (false)
#define VIXL_CHECK(condition) \
do { \
if (!(condition)) { \
std::ostringstream oss; \
oss << "Assertion failed (" #condition ")\nin "; \
oss << __FILE__ << ", line " << __LINE__ << std::endl; \
throw std::runtime_error(oss.str()); \
} \
} while (false)
#else
#define VIXL_ABORT() \
do { \
printf("Aborting in %s, line %i\n", __FILE__, __LINE__); \
abort(); \
} while (false)
#define VIXL_ABORT_WITH_MSG(msg) \
do { \
printf("%sin %s, line %i\n", (msg), __FILE__, __LINE__); \
abort(); \
} while (false)
#define VIXL_CHECK(condition) \
do { \
if (!(condition)) { \
printf("Assertion failed (%s)\nin %s, line %i\n", \
#condition, \
__FILE__, \
__LINE__); \
abort(); \
} \
} while (false)
#endif
#ifdef VIXL_DEBUG
#define VIXL_ASSERT(condition) VIXL_CHECK(condition)
#define VIXL_UNIMPLEMENTED() \
do { \
VIXL_ABORT_WITH_MSG("UNIMPLEMENTED "); \
} while (false)
#define VIXL_UNREACHABLE() \
do { \
VIXL_ABORT_WITH_MSG("UNREACHABLE "); \
} while (false)
#else
#define VIXL_ASSERT(condition) ((void)0)
#define VIXL_UNIMPLEMENTED() ((void)0)
#define VIXL_UNREACHABLE() ((void)0)
#endif
// This is not as powerful as template based assertions, but it is simple.
// It assumes that the descriptions are unique. If this starts being a problem,
// we can switch to a different implemention.
#define VIXL_CONCAT(a, b) a##b
#if __cplusplus >= 201103L
#define VIXL_STATIC_ASSERT_LINE(line_unused, condition, message) \
static_assert(condition, message)
#else
#define VIXL_STATIC_ASSERT_LINE(line, condition, message_unused) \
typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
__attribute__((unused))
#endif
#define VIXL_STATIC_ASSERT(condition) \
VIXL_STATIC_ASSERT_LINE(__LINE__, condition, "")
#define VIXL_STATIC_ASSERT_MESSAGE(condition, message) \
VIXL_STATIC_ASSERT_LINE(__LINE__, condition, message)
#define VIXL_WARNING(message) \
do { \
printf("WARNING in %s, line %i: %s", __FILE__, __LINE__, message); \
} while (false)
template <typename T1>
inline void USE(const T1&) {}
template <typename T1, typename T2>
inline void USE(const T1&, const T2&) {}
template <typename T1, typename T2, typename T3>
inline void USE(const T1&, const T2&, const T3&) {}
template <typename T1, typename T2, typename T3, typename T4>
inline void USE(const T1&, const T2&, const T3&, const T4&) {}
#define VIXL_ALIGNMENT_EXCEPTION() \
do { \
fprintf(stderr, "ALIGNMENT EXCEPTION\t"); \
VIXL_ABORT(); \
} while (0)
// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
// argument to annotate intentional fall-through between switch labels.
// For more information please refer to:
// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
#ifndef __has_warning
#define __has_warning(x) 0
#endif
// Fallthrough annotation for Clang and C++11(201103L).
#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
#define VIXL_FALLTHROUGH() [[clang::fallthrough]]
// Fallthrough annotation for GCC >= 7.
#elif __GNUC__ >= 7
#define VIXL_FALLTHROUGH() __attribute__((fallthrough))
#else
#define VIXL_FALLTHROUGH() \
do { \
} while (0)
#endif
#if __cplusplus >= 201103L
#define VIXL_NO_RETURN [[noreturn]]
#else
#define VIXL_NO_RETURN __attribute__((noreturn))
#endif
#ifdef VIXL_DEBUG
#define VIXL_NO_RETURN_IN_DEBUG_MODE VIXL_NO_RETURN
#else
#define VIXL_NO_RETURN_IN_DEBUG_MODE
#endif
#if __cplusplus >= 201103L
#define VIXL_OVERRIDE override
#else
#define VIXL_OVERRIDE
#endif
// Some functions might only be marked as "noreturn" for the DEBUG build. This
// macro should be used for such cases (for more details see what
// VIXL_UNREACHABLE expands to).
#ifdef VIXL_DEBUG
#define VIXL_DEBUG_NO_RETURN VIXL_NO_RETURN
#else
#define VIXL_DEBUG_NO_RETURN
#endif
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 1
#endif
#else
#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 0
#endif
#if VIXL_AARCH64_GENERATE_SIMULATOR_CODE
#warning "Generating Simulator instructions without Simulator support."
#endif
#endif
// We do not have a simulator for AArch32, although we can pretend we do so that
// tests that require running natively can be skipped.
#ifndef __arm__
#define VIXL_INCLUDE_SIMULATOR_AARCH32
#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 1
#endif
#else
#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 0
#endif
#endif
#ifdef USE_SIMULATOR
#error "Please see the release notes for USE_SIMULATOR."
#endif
// Target Architecture/ISA
#ifdef VIXL_INCLUDE_TARGET_A64
#define VIXL_INCLUDE_TARGET_AARCH64
#endif
#if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32)
#define VIXL_INCLUDE_TARGET_AARCH32
#elif defined(VIXL_INCLUDE_TARGET_A32)
#define VIXL_INCLUDE_TARGET_A32_ONLY
#else
#define VIXL_INCLUDE_TARGET_T32_ONLY
#endif
#endif // VIXL_GLOBALS_H

View File

@ -0,0 +1,915 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_INVALSET_H_
#define VIXL_INVALSET_H_
#include <cstring>
#include <algorithm>
#include <vector>
#include "globals-vixl.h"
namespace vixl {
// We define a custom data structure template and its iterator as `std`
// containers do not fit the performance requirements for some of our use cases.
//
// The structure behaves like an iterable unordered set with special properties
// and restrictions. "InvalSet" stands for "Invalidatable Set".
//
// Restrictions and requirements:
// - Adding an element already present in the set is illegal. In debug mode,
// this is checked at insertion time.
// - The templated class `ElementType` must provide comparison operators so that
// `std::sort()` can be used.
// - A key must be available to represent invalid elements.
// - Elements with an invalid key must compare higher or equal to any other
// element.
//
// Use cases and performance considerations:
// Our use cases present two specificities that allow us to design this
// structure to provide fast insertion *and* fast search and deletion
// operations:
// - Elements are (generally) inserted in order (sorted according to their key).
// - A key is available to mark elements as invalid (deleted).
// The backing `std::vector` allows for fast insertions. When
// searching for an element we ensure the elements are sorted (this is generally
// the case) and perform a binary search. When deleting an element we do not
// free the associated memory immediately. Instead, an element to be deleted is
// marked with the 'invalid' key. Other methods of the container take care of
// ignoring entries marked as invalid.
// To avoid the overhead of the `std::vector` container when only few entries
// are used, a number of elements are preallocated.
// 'ElementType' and 'KeyType' are respectively the types of the elements and
// their key. The structure only reclaims memory when safe to do so, if the
// number of elements that can be reclaimed is greater than `RECLAIM_FROM` and
// greater than `<total number of elements> / RECLAIM_FACTOR.
// clang-format off
#define TEMPLATE_INVALSET_P_DECL \
class ElementType, \
unsigned N_PREALLOCATED_ELEMENTS, \
class KeyType, \
KeyType INVALID_KEY, \
size_t RECLAIM_FROM, \
unsigned RECLAIM_FACTOR
// clang-format on
#define TEMPLATE_INVALSET_P_DEF \
ElementType, N_PREALLOCATED_ELEMENTS, KeyType, INVALID_KEY, RECLAIM_FROM, \
RECLAIM_FACTOR
template <class S>
class InvalSetIterator; // Forward declaration.
template <TEMPLATE_INVALSET_P_DECL>
class InvalSet {
public:
InvalSet();
~InvalSet();
static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS;
static const KeyType kInvalidKey = INVALID_KEY;
// C++ STL iterator interface.
typedef InvalSetIterator<InvalSet<TEMPLATE_INVALSET_P_DEF> > iterator;
iterator begin();
iterator end();
// It is illegal to insert an element already present in the set.
void insert(const ElementType& element);
// Looks for the specified element in the set and - if found - deletes it.
// The return value is the number of elements erased: either 0 or 1.
size_t erase(const ElementType& element);
// This indicates the number of (valid) elements stored in this set.
size_t size() const;
// Returns true if no elements are stored in the set.
// Note that this does not mean the the backing storage is empty: it can still
// contain invalid elements.
bool empty() const;
void clear();
const ElementType GetMinElement();
// This returns the key of the minimum element in the set.
KeyType GetMinElementKey();
static bool IsValid(const ElementType& element);
static KeyType GetKey(const ElementType& element);
static void SetKey(ElementType* element, KeyType key);
typedef ElementType _ElementType;
typedef KeyType _KeyType;
protected:
// Returns a pointer to the element in vector_ if it was found, or NULL
// otherwise.
ElementType* Search(const ElementType& element);
// The argument *must* point to an element stored in *this* set.
// This function is not allowed to move elements in the backing vector
// storage.
void EraseInternal(ElementType* element);
// The elements in the range searched must be sorted.
ElementType* BinarySearch(const ElementType& element,
ElementType* start,
ElementType* end) const;
// Sort the elements.
enum SortType {
// The 'hard' version guarantees that invalid elements are moved to the end
// of the container.
kHardSort,
// The 'soft' version only guarantees that the elements will be sorted.
// Invalid elements may still be present anywhere in the set.
kSoftSort
};
void Sort(SortType sort_type);
// Delete the elements that have an invalid key. The complexity is linear
// with the size of the vector.
void Clean();
const ElementType Front() const;
const ElementType Back() const;
// Delete invalid trailing elements and return the last valid element in the
// set.
const ElementType CleanBack();
// Returns a pointer to the start or end of the backing storage.
const ElementType* StorageBegin() const;
const ElementType* StorageEnd() const;
ElementType* StorageBegin();
ElementType* StorageEnd();
// Returns the index of the element within the backing storage. The element
// must belong to the backing storage.
size_t GetElementIndex(const ElementType* element) const;
// Returns the element at the specified index in the backing storage.
const ElementType* GetElementAt(size_t index) const;
ElementType* GetElementAt(size_t index);
static const ElementType* GetFirstValidElement(const ElementType* from,
const ElementType* end);
void CacheMinElement();
const ElementType GetCachedMinElement() const;
bool ShouldReclaimMemory() const;
void ReclaimMemory();
bool IsUsingVector() const { return vector_ != NULL; }
void SetSorted(bool sorted) { sorted_ = sorted; }
// We cache some data commonly required by users to improve performance.
// We cannot cache pointers to elements as we do not control the backing
// storage.
bool valid_cached_min_;
size_t cached_min_index_; // Valid iff `valid_cached_min_` is true.
KeyType cached_min_key_; // Valid iff `valid_cached_min_` is true.
// Indicates whether the elements are sorted.
bool sorted_;
// This represents the number of (valid) elements in this set.
size_t size_;
// The backing storage is either the array of preallocated elements or the
// vector. The structure starts by using the preallocated elements, and
// transitions (permanently) to using the vector once more than
// kNPreallocatedElements are used.
// Elements are only invalidated when using the vector. The preallocated
// storage always only contains valid elements.
ElementType preallocated_[kNPreallocatedElements];
std::vector<ElementType>* vector_;
// Iterators acquire and release this monitor. While a set is acquired,
// certain operations are illegal to ensure that the iterator will
// correctly iterate over the elements in the set.
int monitor_;
#ifdef VIXL_DEBUG
int monitor() const { return monitor_; }
void Acquire() { monitor_++; }
void Release() {
monitor_--;
VIXL_ASSERT(monitor_ >= 0);
}
#endif
private:
// The copy constructor and assignment operator are not used and the defaults
// are unsafe, so disable them (without an implementation).
#if __cplusplus >= 201103L
InvalSet(const InvalSet& other) = delete;
InvalSet operator=(const InvalSet& other) = delete;
#else
InvalSet(const InvalSet& other);
InvalSet operator=(const InvalSet& other);
#endif
friend class InvalSetIterator<InvalSet<TEMPLATE_INVALSET_P_DEF> >;
};
template <class S>
class InvalSetIterator : public std::iterator<std::forward_iterator_tag,
typename S::_ElementType> {
private:
// Redefine types to mirror the associated set types.
typedef typename S::_ElementType ElementType;
typedef typename S::_KeyType KeyType;
public:
explicit InvalSetIterator(S* inval_set = NULL);
// This class implements the standard copy-swap idiom.
~InvalSetIterator();
InvalSetIterator(const InvalSetIterator<S>& other);
InvalSetIterator<S>& operator=(InvalSetIterator<S> other);
#if __cplusplus >= 201103L
InvalSetIterator(InvalSetIterator<S>&& other) noexcept;
#endif
friend void swap(InvalSetIterator<S>& a, InvalSetIterator<S>& b) {
using std::swap;
swap(a.using_vector_, b.using_vector_);
swap(a.index_, b.index_);
swap(a.inval_set_, b.inval_set_);
}
// Return true if the iterator is at the end of the set.
bool Done() const;
// Move this iterator to the end of the set.
void Finish();
// Delete the current element and advance the iterator to point to the next
// element.
void DeleteCurrentAndAdvance();
static bool IsValid(const ElementType& element);
static KeyType GetKey(const ElementType& element);
// Extra helpers to support the forward-iterator interface.
InvalSetIterator<S>& operator++(); // Pre-increment.
InvalSetIterator<S> operator++(int); // Post-increment.
bool operator==(const InvalSetIterator<S>& rhs) const;
bool operator!=(const InvalSetIterator<S>& rhs) const {
return !(*this == rhs);
}
ElementType& operator*() { return *Current(); }
const ElementType& operator*() const { return *Current(); }
ElementType* operator->() { return Current(); }
const ElementType* operator->() const { return Current(); }
protected:
void MoveToValidElement();
// Indicates if the iterator is looking at the vector or at the preallocated
// elements.
bool using_vector_;
// Used when looking at the preallocated elements, or in debug mode when using
// the vector to track how many times the iterator has advanced.
size_t index_;
typename std::vector<ElementType>::iterator iterator_;
S* inval_set_;
// TODO: These helpers are deprecated and will be removed in future versions
// of VIXL.
ElementType* Current() const;
void Advance();
};
template <TEMPLATE_INVALSET_P_DECL>
InvalSet<TEMPLATE_INVALSET_P_DEF>::InvalSet()
: valid_cached_min_(false), sorted_(true), size_(0), vector_(NULL) {
#ifdef VIXL_DEBUG
monitor_ = 0;
#endif
}
template <TEMPLATE_INVALSET_P_DECL>
InvalSet<TEMPLATE_INVALSET_P_DEF>::~InvalSet() {
VIXL_ASSERT(monitor_ == 0);
delete vector_;
}
template <TEMPLATE_INVALSET_P_DECL>
typename InvalSet<TEMPLATE_INVALSET_P_DEF>::iterator
InvalSet<TEMPLATE_INVALSET_P_DEF>::begin() {
return iterator(this);
}
template <TEMPLATE_INVALSET_P_DECL>
typename InvalSet<TEMPLATE_INVALSET_P_DEF>::iterator
InvalSet<TEMPLATE_INVALSET_P_DEF>::end() {
iterator end(this);
end.Finish();
return end;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::insert(const ElementType& element) {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(IsValid(element));
VIXL_ASSERT(Search(element) == NULL);
SetSorted(empty() || (sorted_ && (element > CleanBack())));
if (IsUsingVector()) {
vector_->push_back(element);
} else {
if (size_ < kNPreallocatedElements) {
preallocated_[size_] = element;
} else {
// Transition to using the vector.
vector_ =
new std::vector<ElementType>(preallocated_, preallocated_ + size_);
vector_->push_back(element);
}
}
size_++;
if (valid_cached_min_ && (element < GetMinElement())) {
cached_min_index_ = IsUsingVector() ? vector_->size() - 1 : size_ - 1;
cached_min_key_ = GetKey(element);
valid_cached_min_ = true;
}
if (ShouldReclaimMemory()) {
ReclaimMemory();
}
}
template <TEMPLATE_INVALSET_P_DECL>
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::erase(const ElementType& element) {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(IsValid(element));
ElementType* local_element = Search(element);
if (local_element != NULL) {
EraseInternal(local_element);
return 1;
}
return 0;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::Search(
const ElementType& element) {
VIXL_ASSERT(monitor() == 0);
if (empty()) {
return NULL;
}
if (ShouldReclaimMemory()) {
ReclaimMemory();
}
if (!sorted_) {
Sort(kHardSort);
}
if (!valid_cached_min_) {
CacheMinElement();
}
return BinarySearch(element, GetElementAt(cached_min_index_), StorageEnd());
}
template <TEMPLATE_INVALSET_P_DECL>
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::size() const {
return size_;
}
template <TEMPLATE_INVALSET_P_DECL>
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::empty() const {
return size_ == 0;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::clear() {
VIXL_ASSERT(monitor() == 0);
size_ = 0;
if (IsUsingVector()) {
vector_->clear();
}
SetSorted(true);
valid_cached_min_ = false;
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::GetMinElement() {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(!empty());
CacheMinElement();
return *GetElementAt(cached_min_index_);
}
template <TEMPLATE_INVALSET_P_DECL>
KeyType InvalSet<TEMPLATE_INVALSET_P_DEF>::GetMinElementKey() {
VIXL_ASSERT(monitor() == 0);
if (valid_cached_min_) {
return cached_min_key_;
} else {
return GetKey(GetMinElement());
}
}
template <TEMPLATE_INVALSET_P_DECL>
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::IsValid(const ElementType& element) {
return GetKey(element) != kInvalidKey;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::EraseInternal(ElementType* element) {
// Note that this function must be safe even while an iterator has acquired
// this set.
VIXL_ASSERT(element != NULL);
size_t deleted_index = GetElementIndex(element);
if (IsUsingVector()) {
VIXL_ASSERT((&(vector_->front()) <= element) &&
(element <= &(vector_->back())));
SetKey(element, kInvalidKey);
} else {
VIXL_ASSERT((preallocated_ <= element) &&
(element < (preallocated_ + kNPreallocatedElements)));
ElementType* end = preallocated_ + kNPreallocatedElements;
size_t copy_size = sizeof(*element) * (end - element - 1);
memmove(element, element + 1, copy_size);
}
size_--;
if (valid_cached_min_ && (deleted_index == cached_min_index_)) {
if (sorted_ && !empty()) {
const ElementType* min = GetFirstValidElement(element, StorageEnd());
cached_min_index_ = GetElementIndex(min);
cached_min_key_ = GetKey(*min);
valid_cached_min_ = true;
} else {
valid_cached_min_ = false;
}
}
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::BinarySearch(
const ElementType& element, ElementType* start, ElementType* end) const {
if (start == end) {
return NULL;
}
VIXL_ASSERT(sorted_);
VIXL_ASSERT(start < end);
VIXL_ASSERT(!empty());
// Perform a binary search through the elements while ignoring invalid
// elements.
ElementType* elements = start;
size_t low = 0;
size_t high = (end - start) - 1;
while (low < high) {
// Find valid bounds.
while (!IsValid(elements[low]) && (low < high)) ++low;
while (!IsValid(elements[high]) && (low < high)) --high;
VIXL_ASSERT(low <= high);
// Avoid overflow when computing the middle index.
size_t middle = low + (high - low) / 2;
if ((middle == low) || (middle == high)) {
break;
}
while ((middle < high - 1) && !IsValid(elements[middle])) ++middle;
while ((low + 1 < middle) && !IsValid(elements[middle])) --middle;
if (!IsValid(elements[middle])) {
break;
}
if (elements[middle] < element) {
low = middle;
} else {
high = middle;
}
}
if (elements[low] == element) return &elements[low];
if (elements[high] == element) return &elements[high];
return NULL;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Sort(SortType sort_type) {
if (sort_type == kSoftSort) {
if (sorted_) {
return;
}
}
VIXL_ASSERT(monitor() == 0);
if (empty()) {
return;
}
Clean();
std::sort(StorageBegin(), StorageEnd());
SetSorted(true);
cached_min_index_ = 0;
cached_min_key_ = GetKey(Front());
valid_cached_min_ = true;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Clean() {
VIXL_ASSERT(monitor() == 0);
if (empty() || !IsUsingVector()) {
return;
}
// Manually iterate through the vector storage to discard invalid elements.
ElementType* start = &(vector_->front());
ElementType* end = start + vector_->size();
ElementType* c = start;
ElementType* first_invalid;
ElementType* first_valid;
ElementType* next_invalid;
while ((c < end) && IsValid(*c)) c++;
first_invalid = c;
while (c < end) {
while ((c < end) && !IsValid(*c)) c++;
first_valid = c;
while ((c < end) && IsValid(*c)) c++;
next_invalid = c;
ptrdiff_t n_moved_elements = (next_invalid - first_valid);
memmove(first_invalid, first_valid, n_moved_elements * sizeof(*c));
first_invalid = first_invalid + n_moved_elements;
c = next_invalid;
}
// Delete the trailing invalid elements.
vector_->erase(vector_->begin() + (first_invalid - start), vector_->end());
VIXL_ASSERT(vector_->size() == size_);
if (sorted_) {
valid_cached_min_ = true;
cached_min_index_ = 0;
cached_min_key_ = GetKey(*GetElementAt(0));
} else {
valid_cached_min_ = false;
}
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Front() const {
VIXL_ASSERT(!empty());
return IsUsingVector() ? vector_->front() : preallocated_[0];
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Back() const {
VIXL_ASSERT(!empty());
return IsUsingVector() ? vector_->back() : preallocated_[size_ - 1];
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::CleanBack() {
VIXL_ASSERT(monitor() == 0);
if (IsUsingVector()) {
// Delete the invalid trailing elements.
typename std::vector<ElementType>::reverse_iterator it = vector_->rbegin();
while (!IsValid(*it)) {
it++;
}
vector_->erase(it.base(), vector_->end());
}
return Back();
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() const {
return IsUsingVector() ? &(vector_->front()) : preallocated_;
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() const {
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() {
return IsUsingVector() ? &(vector_->front()) : preallocated_;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() {
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
}
template <TEMPLATE_INVALSET_P_DECL>
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::GetElementIndex(
const ElementType* element) const {
VIXL_ASSERT((StorageBegin() <= element) && (element < StorageEnd()));
return element - StorageBegin();
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::GetElementAt(
size_t index) const {
VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) ||
(index < size_));
return StorageBegin() + index;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::GetElementAt(size_t index) {
VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) ||
(index < size_));
return StorageBegin() + index;
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::GetFirstValidElement(
const ElementType* from, const ElementType* end) {
while ((from < end) && !IsValid(*from)) {
from++;
}
return from;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::CacheMinElement() {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(!empty());
if (valid_cached_min_) {
return;
}
if (sorted_) {
const ElementType* min = GetFirstValidElement(StorageBegin(), StorageEnd());
cached_min_index_ = GetElementIndex(min);
cached_min_key_ = GetKey(*min);
valid_cached_min_ = true;
} else {
Sort(kHardSort);
}
VIXL_ASSERT(valid_cached_min_);
}
template <TEMPLATE_INVALSET_P_DECL>
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::ShouldReclaimMemory() const {
if (!IsUsingVector()) {
return false;
}
size_t n_invalid_elements = vector_->size() - size_;
return (n_invalid_elements > RECLAIM_FROM) &&
(n_invalid_elements > vector_->size() / RECLAIM_FACTOR);
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::ReclaimMemory() {
VIXL_ASSERT(monitor() == 0);
Clean();
}
template <class S>
InvalSetIterator<S>::InvalSetIterator(S* inval_set)
: using_vector_((inval_set != NULL) && inval_set->IsUsingVector()),
index_(0),
inval_set_(inval_set) {
if (inval_set != NULL) {
inval_set->Sort(S::kSoftSort);
#ifdef VIXL_DEBUG
inval_set->Acquire();
#endif
if (using_vector_) {
iterator_ = typename std::vector<ElementType>::iterator(
inval_set_->vector_->begin());
}
MoveToValidElement();
}
}
template <class S>
InvalSetIterator<S>::~InvalSetIterator() {
#ifdef VIXL_DEBUG
if (inval_set_ != NULL) inval_set_->Release();
#endif
}
template <class S>
typename S::_ElementType* InvalSetIterator<S>::Current() const {
VIXL_ASSERT(!Done());
if (using_vector_) {
return &(*iterator_);
} else {
return &(inval_set_->preallocated_[index_]);
}
}
template <class S>
void InvalSetIterator<S>::Advance() {
++(*this);
}
template <class S>
bool InvalSetIterator<S>::Done() const {
if (using_vector_) {
bool done = (iterator_ == inval_set_->vector_->end());
VIXL_ASSERT(done == (index_ == inval_set_->size()));
return done;
} else {
return index_ == inval_set_->size();
}
}
template <class S>
void InvalSetIterator<S>::Finish() {
VIXL_ASSERT(inval_set_->sorted_);
if (using_vector_) {
iterator_ = inval_set_->vector_->end();
}
index_ = inval_set_->size();
}
template <class S>
void InvalSetIterator<S>::DeleteCurrentAndAdvance() {
if (using_vector_) {
inval_set_->EraseInternal(&(*iterator_));
MoveToValidElement();
} else {
inval_set_->EraseInternal(inval_set_->preallocated_ + index_);
}
}
template <class S>
bool InvalSetIterator<S>::IsValid(const ElementType& element) {
return S::IsValid(element);
}
template <class S>
typename S::_KeyType InvalSetIterator<S>::GetKey(const ElementType& element) {
return S::GetKey(element);
}
template <class S>
void InvalSetIterator<S>::MoveToValidElement() {
if (using_vector_) {
while ((iterator_ != inval_set_->vector_->end()) && !IsValid(*iterator_)) {
iterator_++;
}
} else {
VIXL_ASSERT(inval_set_->empty() || IsValid(inval_set_->preallocated_[0]));
// Nothing to do.
}
}
template <class S>
InvalSetIterator<S>::InvalSetIterator(const InvalSetIterator<S>& other)
: using_vector_(other.using_vector_),
index_(other.index_),
inval_set_(other.inval_set_) {
#ifdef VIXL_DEBUG
if (inval_set_ != NULL) inval_set_->Acquire();
#endif
}
#if __cplusplus >= 201103L
template <class S>
InvalSetIterator<S>::InvalSetIterator(InvalSetIterator<S>&& other) noexcept
: using_vector_(false),
index_(0),
inval_set_(NULL) {
swap(*this, other);
}
#endif
template <class S>
InvalSetIterator<S>& InvalSetIterator<S>::operator=(InvalSetIterator<S> other) {
swap(*this, other);
return *this;
}
template <class S>
bool InvalSetIterator<S>::operator==(const InvalSetIterator<S>& rhs) const {
bool equal = (inval_set_ == rhs.inval_set_);
// If the inval_set_ matches, using_vector_ must also match.
VIXL_ASSERT(!equal || (using_vector_ == rhs.using_vector_));
if (using_vector_) {
equal = equal && (iterator_ == rhs.iterator_);
// In debug mode, index_ is maintained even with using_vector_.
VIXL_ASSERT(!equal || (index_ == rhs.index_));
} else {
equal = equal && (index_ == rhs.index_);
#ifdef DEBUG
// If not using_vector_, iterator_ should be default-initialised.
typename std::vector<ElementType>::iterator default_iterator;
VIXL_ASSERT(iterator_ == default_iterator);
VIXL_ASSERT(rhs.iterator_ == default_iterator);
#endif
}
return equal;
}
template <class S>
InvalSetIterator<S>& InvalSetIterator<S>::operator++() {
// Pre-increment.
VIXL_ASSERT(!Done());
if (using_vector_) {
iterator_++;
#ifdef VIXL_DEBUG
index_++;
#endif
MoveToValidElement();
} else {
index_++;
}
return *this;
}
template <class S>
InvalSetIterator<S> InvalSetIterator<S>::operator++(int /* unused */) {
// Post-increment.
VIXL_ASSERT(!Done());
InvalSetIterator<S> old(*this);
++(*this);
return old;
}
#undef TEMPLATE_INVALSET_P_DECL
#undef TEMPLATE_INVALSET_P_DEF
} // namespace vixl
#endif // VIXL_INVALSET_H_

View File

@ -0,0 +1,75 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_MACRO_ASSEMBLER_INTERFACE_H
#define VIXL_MACRO_ASSEMBLER_INTERFACE_H
#include "assembler-base-vixl.h"
namespace vixl {
class MacroAssemblerInterface {
public:
virtual internal::AssemblerBase* AsAssemblerBase() = 0;
virtual ~MacroAssemblerInterface() {}
virtual bool AllowMacroInstructions() const = 0;
virtual bool ArePoolsBlocked() const = 0;
protected:
virtual void SetAllowMacroInstructions(bool allow) = 0;
virtual void BlockPools() = 0;
virtual void ReleasePools() = 0;
virtual void EnsureEmitPoolsFor(size_t size) = 0;
// Emit the branch over a literal/veneer pool, and any necessary padding
// before it.
virtual void EmitPoolHeader() = 0;
// When this is called, the label used for branching over the pool is bound.
// This can also generate additional padding, which must correspond to the
// alignment_ value passed to the PoolManager (which needs to keep track of
// the exact size of the generated pool).
virtual void EmitPoolFooter() = 0;
// Emit n bytes of padding that does not have to be executable.
virtual void EmitPaddingBytes(int n) = 0;
// Emit n bytes of padding that has to be executable. Implementations must
// make sure this is a multiple of the instruction size.
virtual void EmitNopBytes(int n) = 0;
// The following scopes need access to the above method in order to implement
// pool blocking and temporarily disable the macro-assembler.
friend class ExactAssemblyScope;
friend class EmissionCheckScope;
template <typename T>
friend class PoolManager;
};
} // namespace vixl
#endif // VIXL_MACRO_ASSEMBLER_INTERFACE_H

View File

@ -0,0 +1,39 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef PLATFORM_H
#define PLATFORM_H
// Define platform specific functionalities.
extern "C" {
#include <signal.h>
}
namespace vixl {
inline void HostBreakpoint() { raise(SIGINT); }
} // namespace vixl
#endif

View File

@ -0,0 +1,522 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_POOL_MANAGER_IMPL_H_
#define VIXL_POOL_MANAGER_IMPL_H_
#include "pool-manager.h"
#include <algorithm>
#include "assembler-base-vixl.h"
namespace vixl {
template <typename T>
T PoolManager<T>::Emit(MacroAssemblerInterface* masm,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
LocationBase<T>* new_object,
EmitOption option) {
// Make sure that the buffer still has the alignment we think it does.
VIXL_ASSERT(IsAligned(masm->AsAssemblerBase()
->GetBuffer()
->GetStartAddress<uintptr_t>(),
buffer_alignment_));
// We should not call this method when the pools are blocked.
VIXL_ASSERT(!IsBlocked());
if (objects_.empty()) return pc;
// Emit header.
if (option == kBranchRequired) {
masm->EmitPoolHeader();
// TODO: The pc at this point might not actually be aligned according to
// alignment_. This is to support the current AARCH32 MacroAssembler which
// does not have a fixed size instruction set. In practice, the pc will be
// aligned to the alignment instructions need for the current instruction
// set, so we do not need to align it here. All other calculations do take
// the alignment into account, which only makes the checkpoint calculations
// more conservative when we use T32. Uncomment the following assertion if
// the AARCH32 MacroAssembler is modified to only support one ISA at the
// time.
// VIXL_ASSERT(pc == AlignUp(pc, alignment_));
pc += header_size_;
} else {
// If the header is optional, we might need to add some extra padding to
// meet the minimum location of the first object.
if (pc < objects_[0].min_location_) {
int32_t padding = objects_[0].min_location_ - pc;
masm->EmitNopBytes(padding);
pc += padding;
}
}
PoolObject<T>* existing_object = GetObjectIfTracked(new_object);
// Go through all objects and emit one by one.
for (objects_iter iter = objects_.begin(); iter != objects_.end();) {
PoolObject<T>& current = *iter;
if (ShouldSkipObject(&current,
pc,
num_bytes,
new_reference,
new_object,
existing_object)) {
++iter;
continue;
}
LocationBase<T>* label_base = current.label_base_;
T aligned_pc = AlignUp(pc, current.alignment_);
masm->EmitPaddingBytes(aligned_pc - pc);
pc = aligned_pc;
VIXL_ASSERT(pc >= current.min_location_);
VIXL_ASSERT(pc <= current.max_location_);
// First call SetLocation, which will also resolve the references, and then
// call EmitPoolObject, which might add a new reference.
label_base->SetLocation(masm->AsAssemblerBase(), pc);
label_base->EmitPoolObject(masm);
int object_size = label_base->GetPoolObjectSizeInBytes();
if (label_base->ShouldDeletePoolObjectOnPlacement()) {
label_base->MarkBound();
iter = RemoveAndDelete(iter);
} else {
VIXL_ASSERT(!current.label_base_->ShouldDeletePoolObjectOnPlacement());
current.label_base_->UpdatePoolObject(&current);
VIXL_ASSERT(current.alignment_ >= label_base->GetPoolObjectAlignment());
++iter;
}
pc += object_size;
}
// Recalculate the checkpoint before emitting the footer. The footer might
// call Bind() which will check if we need to emit.
RecalculateCheckpoint();
// Always emit footer - this might add some padding.
masm->EmitPoolFooter();
pc = AlignUp(pc, alignment_);
return pc;
}
template <typename T>
bool PoolManager<T>::ShouldSkipObject(PoolObject<T>* pool_object,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
LocationBase<T>* new_object,
PoolObject<T>* existing_object) const {
// We assume that all objects before this have been skipped and all objects
// after this will be emitted, therefore we will emit the whole pool. Add
// the header size and alignment, as well as the number of bytes we are
// planning to emit.
T max_actual_location = pc + num_bytes + max_pool_size_;
if (new_reference != NULL) {
// If we're adding a new object, also assume that it will have to be emitted
// before the object we are considering to skip.
VIXL_ASSERT(new_object != NULL);
T new_object_alignment = std::max(new_reference->object_alignment_,
new_object->GetPoolObjectAlignment());
if ((existing_object != NULL) &&
(existing_object->alignment_ > new_object_alignment)) {
new_object_alignment = existing_object->alignment_;
}
max_actual_location +=
(new_object->GetPoolObjectSizeInBytes() + new_object_alignment - 1);
}
// Hard limit.
if (max_actual_location >= pool_object->max_location_) return false;
// Use heuristic.
return (pc < pool_object->skip_until_location_hint_);
}
template <typename T>
T PoolManager<T>::UpdateCheckpointForObject(T checkpoint,
const PoolObject<T>* object) {
checkpoint -= object->label_base_->GetPoolObjectSizeInBytes();
if (checkpoint > object->max_location_) checkpoint = object->max_location_;
checkpoint = AlignDown(checkpoint, object->alignment_);
return checkpoint;
}
template <typename T>
static T MaxCheckpoint() {
return std::numeric_limits<T>::max();
}
template <typename T>
static inline bool CheckCurrentPC(T pc, T checkpoint) {
VIXL_ASSERT(pc <= checkpoint);
// We must emit the pools if we are at the checkpoint now.
return pc == checkpoint;
}
template <typename T>
static inline bool CheckFuturePC(T pc, T checkpoint) {
// We do not need to emit the pools now if the projected future PC will be
// equal to the checkpoint (we will need to emit the pools then).
return pc > checkpoint;
}
template <typename T>
bool PoolManager<T>::MustEmit(T pc,
int num_bytes,
ForwardReference<T>* reference,
LocationBase<T>* label_base) const {
// Check if we are at or past the checkpoint.
if (CheckCurrentPC(pc, checkpoint_)) return true;
// Check if the future PC will be past the checkpoint.
pc += num_bytes;
if (CheckFuturePC(pc, checkpoint_)) return true;
// No new reference - nothing to do.
if (reference == NULL) {
VIXL_ASSERT(label_base == NULL);
return false;
}
if (objects_.empty()) {
// Basic assertions that restrictions on the new (and only) reference are
// possible to satisfy.
VIXL_ASSERT(AlignUp(pc + header_size_, alignment_) >=
reference->min_object_location_);
VIXL_ASSERT(pc <= reference->max_object_location_);
return false;
}
// Check if the object is already being tracked.
const PoolObject<T>* existing_object = GetObjectIfTracked(label_base);
if (existing_object != NULL) {
// If the existing_object is already in existing_objects_ and its new
// alignment and new location restrictions are not stricter, skip the more
// expensive check.
if ((reference->min_object_location_ <= existing_object->min_location_) &&
(reference->max_object_location_ >= existing_object->max_location_) &&
(reference->object_alignment_ <= existing_object->alignment_)) {
return false;
}
}
// Create a temporary object.
PoolObject<T> temp(label_base);
temp.RestrictRange(reference->min_object_location_,
reference->max_object_location_);
temp.RestrictAlignment(reference->object_alignment_);
if (existing_object != NULL) {
temp.RestrictRange(existing_object->min_location_,
existing_object->max_location_);
temp.RestrictAlignment(existing_object->alignment_);
}
// Check if the new reference can be added after the end of the current pool.
// If yes, we don't need to emit.
T last_reachable = AlignDown(temp.max_location_, temp.alignment_);
const PoolObject<T>& last = objects_.back();
T after_pool = AlignDown(last.max_location_, last.alignment_) +
last.label_base_->GetPoolObjectSizeInBytes();
// The current object can be placed at the end of the pool, even if the last
// object is placed at the last possible location.
if (last_reachable >= after_pool) return false;
// The current object can be placed after the code we are about to emit and
// after the existing pool (with a pessimistic size estimate).
if (last_reachable >= pc + num_bytes + max_pool_size_) return false;
// We're not in a trivial case, so we need to recalculate the checkpoint.
// Check (conservatively) if we can fit it into the objects_ array, without
// breaking our assumptions. Here we want to recalculate the checkpoint as
// if the new reference was added to the PoolManager but without actually
// adding it (as removing it is non-trivial).
T checkpoint = MaxCheckpoint<T>();
// Will temp be the last object in objects_?
if (PoolObjectLessThan(last, temp)) {
checkpoint = UpdateCheckpointForObject(checkpoint, &temp);
if (checkpoint < temp.min_location_) return true;
}
bool tempNotPlacedYet = true;
for (int i = static_cast<int>(objects_.size()) - 1; i >= 0; --i) {
const PoolObject<T>& current = objects_[i];
if (tempNotPlacedYet && PoolObjectLessThan(current, temp)) {
checkpoint = UpdateCheckpointForObject(checkpoint, &temp);
if (checkpoint < temp.min_location_) return true;
if (CheckFuturePC(pc, checkpoint)) return true;
tempNotPlacedYet = false;
}
if (current.label_base_ == label_base) continue;
checkpoint = UpdateCheckpointForObject(checkpoint, &current);
if (checkpoint < current.min_location_) return true;
if (CheckFuturePC(pc, checkpoint)) return true;
}
// temp is the object with the smallest max_location_.
if (tempNotPlacedYet) {
checkpoint = UpdateCheckpointForObject(checkpoint, &temp);
if (checkpoint < temp.min_location_) return true;
}
// Take the header into account.
checkpoint -= header_size_;
checkpoint = AlignDown(checkpoint, alignment_);
return CheckFuturePC(pc, checkpoint);
}
template <typename T>
void PoolManager<T>::RecalculateCheckpoint(SortOption sort_option) {
// TODO: Improve the max_pool_size_ estimate by starting from the
// min_location_ of the first object, calculating the end of the pool as if
// all objects were placed starting from there, and in the end adding the
// maximum object alignment found minus one (which is the maximum extra
// padding we would need if we were to relocate the pool to a different
// address).
max_pool_size_ = 0;
if (objects_.empty()) {
checkpoint_ = MaxCheckpoint<T>();
return;
}
// Sort objects by their max_location_.
if (sort_option == kSortRequired) {
std::sort(objects_.begin(), objects_.end(), PoolObjectLessThan);
}
// Add the header size and header and footer max alignment to the maximum
// pool size.
max_pool_size_ += header_size_ + 2 * (alignment_ - 1);
T checkpoint = MaxCheckpoint<T>();
int last_object_index = static_cast<int>(objects_.size()) - 1;
for (int i = last_object_index; i >= 0; --i) {
// Bring back the checkpoint by the size of the current object, unless
// we need to bring it back more, then align.
PoolObject<T>& current = objects_[i];
checkpoint = UpdateCheckpointForObject(checkpoint, &current);
VIXL_ASSERT(checkpoint >= current.min_location_);
max_pool_size_ += (current.alignment_ - 1 +
current.label_base_->GetPoolObjectSizeInBytes());
}
// Take the header into account.
checkpoint -= header_size_;
checkpoint = AlignDown(checkpoint, alignment_);
// Update the checkpoint of the pool manager.
checkpoint_ = checkpoint;
// NOTE: To handle min_location_ in the generic case, we could make a second
// pass of the objects_ vector, increasing the checkpoint as needed, while
// maintaining the alignment requirements.
// It should not be possible to have any issues with min_location_ with actual
// code, since there should always be some kind of branch over the pool,
// whether introduced by the pool emission or by the user, which will make
// sure the min_location_ requirement is satisfied. It's possible that the
// user could emit code in the literal pool and intentionally load the first
// value and then fall-through into the pool, but that is not a supported use
// of VIXL and we will assert in that case.
}
template <typename T>
bool PoolManager<T>::PoolObjectLessThan(const PoolObject<T>& a,
const PoolObject<T>& b) {
if (a.max_location_ != b.max_location_)
return (a.max_location_ < b.max_location_);
int a_size = a.label_base_->GetPoolObjectSizeInBytes();
int b_size = b.label_base_->GetPoolObjectSizeInBytes();
if (a_size != b_size) return (a_size < b_size);
if (a.alignment_ != b.alignment_) return (a.alignment_ < b.alignment_);
if (a.min_location_ != b.min_location_)
return (a.min_location_ < b.min_location_);
return false;
}
template <typename T>
void PoolManager<T>::AddObjectReference(const ForwardReference<T>* reference,
LocationBase<T>* label_base) {
VIXL_ASSERT(reference->object_alignment_ <= buffer_alignment_);
VIXL_ASSERT(label_base->GetPoolObjectAlignment() <= buffer_alignment_);
PoolObject<T>* object = GetObjectIfTracked(label_base);
if (object == NULL) {
PoolObject<T> new_object(label_base);
new_object.RestrictRange(reference->min_object_location_,
reference->max_object_location_);
new_object.RestrictAlignment(reference->object_alignment_);
Insert(new_object);
} else {
object->RestrictRange(reference->min_object_location_,
reference->max_object_location_);
object->RestrictAlignment(reference->object_alignment_);
// Move the object, if needed.
if (objects_.size() != 1) {
PoolObject<T> new_object(*object);
ptrdiff_t distance = std::distance(objects_.data(), object);
objects_.erase(objects_.begin() + distance);
Insert(new_object);
}
}
// No need to sort, we inserted the object in an already sorted array.
RecalculateCheckpoint(kNoSortRequired);
}
template <typename T>
void PoolManager<T>::Insert(const PoolObject<T>& new_object) {
bool inserted = false;
// Place the object in the right position.
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
PoolObject<T>& current = *iter;
if (!PoolObjectLessThan(current, new_object)) {
objects_.insert(iter, new_object);
inserted = true;
break;
}
}
if (!inserted) {
objects_.push_back(new_object);
}
}
template <typename T>
void PoolManager<T>::RemoveAndDelete(PoolObject<T>* object) {
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
PoolObject<T>& current = *iter;
if (current.label_base_ == object->label_base_) {
(void)RemoveAndDelete(iter);
return;
}
}
VIXL_UNREACHABLE();
}
template <typename T>
typename PoolManager<T>::objects_iter PoolManager<T>::RemoveAndDelete(
objects_iter iter) {
PoolObject<T>& object = *iter;
LocationBase<T>* label_base = object.label_base_;
// Check if we also need to delete the LocationBase object.
if (label_base->ShouldBeDeletedOnPoolManagerDestruction()) {
delete_on_destruction_.push_back(label_base);
}
if (label_base->ShouldBeDeletedOnPlacementByPoolManager()) {
VIXL_ASSERT(!label_base->ShouldBeDeletedOnPoolManagerDestruction());
delete label_base;
}
return objects_.erase(iter);
}
template <typename T>
T PoolManager<T>::Bind(MacroAssemblerInterface* masm,
LocationBase<T>* object,
T location) {
PoolObject<T>* existing_object = GetObjectIfTracked(object);
int alignment;
T min_location;
if (existing_object == NULL) {
alignment = object->GetMaxAlignment();
min_location = object->GetMinLocation();
} else {
alignment = existing_object->alignment_;
min_location = existing_object->min_location_;
}
// Align if needed, and add necessary padding to reach the min_location_.
T aligned_location = AlignUp(location, alignment);
masm->EmitNopBytes(aligned_location - location);
location = aligned_location;
while (location < min_location) {
masm->EmitNopBytes(alignment);
location += alignment;
}
object->SetLocation(masm->AsAssemblerBase(), location);
object->MarkBound();
if (existing_object != NULL) {
RemoveAndDelete(existing_object);
// No need to sort, we removed the object from a sorted array.
RecalculateCheckpoint(kNoSortRequired);
}
// We assume that the maximum padding we can possibly add here is less
// than the header alignment - hence that we're not going to go past our
// checkpoint.
VIXL_ASSERT(!CheckFuturePC(location, checkpoint_));
return location;
}
template <typename T>
void PoolManager<T>::Release(T pc) {
USE(pc);
if (--monitor_ == 0) {
// Ensure the pool has not been blocked for too long.
VIXL_ASSERT(pc <= checkpoint_);
}
}
template <typename T>
PoolManager<T>::~PoolManager<T>() {
#ifdef VIXL_DEBUG
// Check for unbound objects.
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
// There should not be any bound objects left in the pool. For unbound
// objects, we will check in the destructor of the object itself.
VIXL_ASSERT(!(*iter).label_base_->IsBound());
}
#endif
// Delete objects the pool manager owns.
for (typename std::vector<LocationBase<T> *>::iterator
iter = delete_on_destruction_.begin(),
end = delete_on_destruction_.end();
iter != end;
++iter) {
delete *iter;
}
}
template <typename T>
int PoolManager<T>::GetPoolSizeForTest() const {
// Iterate over objects and return their cumulative size. This does not take
// any padding into account, just the size of the objects themselves.
int size = 0;
for (const_objects_iter iter = objects_.begin(); iter != objects_.end();
++iter) {
size += (*iter).label_base_->GetPoolObjectSizeInBytes();
}
return size;
}
}
#endif // VIXL_POOL_MANAGER_IMPL_H_

View File

@ -0,0 +1,555 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_POOL_MANAGER_H_
#define VIXL_POOL_MANAGER_H_
#include <stdint.h>
#include <cstddef>
#include <limits>
#include <map>
#include <vector>
#include "globals-vixl.h"
#include "macro-assembler-interface.h"
#include "utils-vixl.h"
namespace vixl {
class TestPoolManager;
// There are four classes declared in this header file:
// PoolManager, PoolObject, ForwardReference and LocationBase.
// The PoolManager manages both literal and veneer pools, and is designed to be
// shared between AArch32 and AArch64. A pool is represented as an abstract
// collection of references to objects. The manager does not need to know
// architecture-specific details about literals and veneers; the actual
// emission of the pool objects is delegated.
//
// Literal and Label will derive from LocationBase. The MacroAssembler will
// create these objects as instructions that reference pool objects are
// encountered, and ask the PoolManager to track them. The PoolManager will
// create an internal PoolObject object for each object derived from
// LocationBase. Some of these PoolObject objects will be deleted when placed
// (e.g. the ones corresponding to Literals), whereas others will be updated
// with a new range when placed (e.g. Veneers) and deleted when Bind() is
// called on the PoolManager with their corresponding object as a parameter.
//
// A ForwardReference represents a reference to a PoolObject that will be
// placed later in the instruction stream. Each ForwardReference may only refer
// to one PoolObject, but many ForwardReferences may refer to the same
// object.
//
// A PoolObject represents an object that has not yet been placed. The final
// location of a PoolObject (and hence the LocationBase object to which it
// corresponds) is constrained mostly by the instructions that refer to it, but
// PoolObjects can also have inherent constraints, such as alignment.
//
// LocationBase objects, unlike PoolObject objects, can be used outside of the
// pool manager (e.g. as manually placed literals, which may still have
// forward references that need to be resolved).
//
// At the moment, each LocationBase will have at most one PoolObject that keeps
// the relevant information for placing this object in the pool. When that
// object is placed, all forward references of the object are resolved. For
// that reason, we do not need to keep track of the ForwardReference objects in
// the PoolObject.
// T is an integral type used for representing locations. For a 32-bit
// architecture it will typically be int32_t, whereas for a 64-bit
// architecture it will be int64_t.
template <typename T>
class ForwardReference;
template <typename T>
class PoolObject;
template <typename T>
class PoolManager;
// Represents an object that has a size and alignment, and either has a known
// location or has not been placed yet. An object of a subclass of LocationBase
// will typically keep track of a number of ForwardReferences when it has not
// yet been placed, but LocationBase does not assume or implement that
// functionality. LocationBase provides virtual methods for emitting the
// object, updating all the forward references, and giving the PoolManager
// information on the lifetime of this object and the corresponding PoolObject.
template <typename T>
class LocationBase {
public:
// The size of a LocationBase object is restricted to 4KB, in order to avoid
// situations where the size of the pool becomes larger than the range of
// an unconditional branch. This cannot happen without having large objects,
// as typically the range of an unconditional branch is the larger range
// an instruction supports.
// TODO: This would ideally be an architecture-specific value, perhaps
// another template parameter.
static const int kMaxObjectSize = 4 * KBytes;
// By default, LocationBase objects are aligned naturally to their size.
LocationBase(uint32_t type, int size)
: pool_object_size_(size),
pool_object_alignment_(size),
pool_object_type_(type),
is_bound_(false),
location_(0) {
VIXL_ASSERT(size > 0);
VIXL_ASSERT(size <= kMaxObjectSize);
VIXL_ASSERT(IsPowerOf2(size));
}
// Allow alignment to be specified, as long as it is smaller than the size.
LocationBase(uint32_t type, int size, int alignment)
: pool_object_size_(size),
pool_object_alignment_(alignment),
pool_object_type_(type),
is_bound_(false),
location_(0) {
VIXL_ASSERT(size > 0);
VIXL_ASSERT(size <= kMaxObjectSize);
VIXL_ASSERT(IsPowerOf2(alignment));
VIXL_ASSERT(alignment <= size);
}
// Constructor for locations that are already bound.
explicit LocationBase(T location)
: pool_object_size_(-1),
pool_object_alignment_(-1),
pool_object_type_(0),
is_bound_(true),
location_(location) {}
virtual ~LocationBase() {}
// The PoolManager should assume ownership of some objects, and delete them
// after they have been placed. This can happen for example for literals that
// are created internally to the MacroAssembler and the user doesn't get a
// handle to. By default, the PoolManager will not do this.
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const { return false; }
// The PoolManager should assume ownership of some objects, and delete them
// when it is destroyed. By default, the PoolManager will not do this.
virtual bool ShouldBeDeletedOnPoolManagerDestruction() const { return false; }
// Emit the PoolObject. Derived classes will implement this method to emit
// the necessary data and/or code (for example, to emit a literal or a
// veneer). This should not add padding, as it is added explicitly by the pool
// manager.
virtual void EmitPoolObject(MacroAssemblerInterface* masm) = 0;
// Resolve the references to this object. Will encode the necessary offset
// in the instruction corresponding to each reference and then delete it.
// TODO: An alternative here would be to provide a ResolveReference()
// method that only asks the LocationBase to resolve a specific reference
// (thus allowing the pool manager to resolve some of the references only).
// This would mean we need to have some kind of API to get all the references
// to a LabelObject.
virtual void ResolveReferences(internal::AssemblerBase* assembler) = 0;
// Returns true when the PoolObject corresponding to this LocationBase object
// needs to be removed from the pool once placed, and false if it needs to
// be updated instead (in which case UpdatePoolObject will be called).
virtual bool ShouldDeletePoolObjectOnPlacement() const { return true; }
// Update the PoolObject after placing it, if necessary. This will happen for
// example in the case of a placed veneer, where we need to use a new updated
// range and a new reference (from the newly added branch instruction).
// By default, this does nothing, to avoid forcing objects that will not need
// this to have an empty implementation.
virtual void UpdatePoolObject(PoolObject<T>*) {}
// Implement heuristics for emitting this object. If a margin is to be used
// as a hint during pool emission, we will try not to emit the object if we
// are further away from the maximum reachable location by more than the
// margin.
virtual bool UsePoolObjectEmissionMargin() const { return false; }
virtual T GetPoolObjectEmissionMargin() const {
VIXL_ASSERT(UsePoolObjectEmissionMargin() == false);
return 0;
}
int GetPoolObjectSizeInBytes() const { return pool_object_size_; }
int GetPoolObjectAlignment() const { return pool_object_alignment_; }
uint32_t GetPoolObjectType() const { return pool_object_type_; }
bool IsBound() const { return is_bound_; }
T GetLocation() const { return location_; }
// This function can be called multiple times before the object is marked as
// bound with MarkBound() below. This is because some objects (e.g. the ones
// used to represent labels) can have veneers; every time we place a veneer
// we need to keep track of the location in order to resolve the references
// to the object. Reusing the location_ field for this is convenient.
void SetLocation(internal::AssemblerBase* assembler, T location) {
VIXL_ASSERT(!is_bound_);
location_ = location;
ResolveReferences(assembler);
}
void MarkBound() {
VIXL_ASSERT(!is_bound_);
is_bound_ = true;
}
// The following two functions are used when an object is bound by a call to
// PoolManager<T>::Bind().
virtual int GetMaxAlignment() const {
VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement());
return 1;
}
virtual T GetMinLocation() const {
VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement());
return 0;
}
private:
// The size of the corresponding PoolObject, in bytes.
int pool_object_size_;
// The alignment of the corresponding PoolObject; this must be a power of two.
int pool_object_alignment_;
// Different derived classes should have different type values. This can be
// used internally by the PoolManager for grouping of objects.
uint32_t pool_object_type_;
// Has the object been bound to a location yet?
bool is_bound_;
protected:
// See comment on SetLocation() for the use of this field.
T location_;
};
template <typename T>
class PoolObject {
public:
// By default, PoolObjects have no inherent position constraints.
explicit PoolObject(LocationBase<T>* parent)
: label_base_(parent),
min_location_(0),
max_location_(std::numeric_limits<T>::max()),
alignment_(parent->GetPoolObjectAlignment()),
skip_until_location_hint_(0),
type_(parent->GetPoolObjectType()) {
VIXL_ASSERT(IsPowerOf2(alignment_));
UpdateLocationHint();
}
// Reset the minimum and maximum location and the alignment of the object.
// This function is public in order to allow the LocationBase corresponding to
// this PoolObject to update the PoolObject when placed, e.g. in the case of
// veneers. The size and type of the object cannot be modified.
void Update(T min, T max, int alignment) {
// We don't use RestrictRange here as the new range is independent of the
// old range (and the maximum location is typically larger).
min_location_ = min;
max_location_ = max;
RestrictAlignment(alignment);
UpdateLocationHint();
}
private:
void RestrictRange(T min, T max) {
VIXL_ASSERT(min <= max_location_);
VIXL_ASSERT(max >= min_location_);
min_location_ = std::max(min_location_, min);
max_location_ = std::min(max_location_, max);
UpdateLocationHint();
}
void RestrictAlignment(int alignment) {
VIXL_ASSERT(IsPowerOf2(alignment));
VIXL_ASSERT(IsPowerOf2(alignment_));
alignment_ = std::max(alignment_, alignment);
}
void UpdateLocationHint() {
if (label_base_->UsePoolObjectEmissionMargin()) {
skip_until_location_hint_ =
max_location_ - label_base_->GetPoolObjectEmissionMargin();
}
}
// The LocationBase that this pool object represents.
LocationBase<T>* label_base_;
// Hard, precise location constraints for the start location of the object.
// They are both inclusive, that is the start location of the object can be
// at any location between min_location_ and max_location_, themselves
// included.
T min_location_;
T max_location_;
// The alignment must be a power of two.
int alignment_;
// Avoid generating this object until skip_until_location_hint_. This
// supports cases where placing the object in the pool has an inherent cost
// that could be avoided in some other way. Veneers are a typical example; we
// would prefer to branch directly (over a pool) rather than use veneers, so
// this value can be set using some heuristic to leave them in the pool.
// This value is only a hint, which will be ignored if it has to in order to
// meet the hard constraints we have.
T skip_until_location_hint_;
// Used only to group objects of similar type together. The PoolManager does
// not know what the types represent.
uint32_t type_;
friend class PoolManager<T>;
};
// Class that represents a forward reference. It is the responsibility of
// LocationBase objects to keep track of forward references and patch them when
// an object is placed - this class is only used by the PoolManager in order to
// restrict the requirements on PoolObjects it is tracking.
template <typename T>
class ForwardReference {
public:
ForwardReference(T location,
int size,
T min_object_location,
T max_object_location,
int object_alignment = 1)
: location_(location),
size_(size),
object_alignment_(object_alignment),
min_object_location_(min_object_location),
max_object_location_(max_object_location) {
VIXL_ASSERT(AlignDown(max_object_location, object_alignment) >=
min_object_location);
}
bool LocationIsEncodable(T location) const {
return location >= min_object_location_ &&
location <= max_object_location_ &&
IsAligned(location, object_alignment_);
}
T GetLocation() const { return location_; }
T GetMinLocation() const { return min_object_location_; }
T GetMaxLocation() const { return max_object_location_; }
int GetAlignment() const { return object_alignment_; }
// Needed for InvalSet.
void SetLocationToInvalidateOnly(T location) { location_ = location; }
private:
// The location of the thing that contains the reference. For example, this
// can be the location of the branch or load instruction.
T location_;
// The size of the instruction that makes the reference, in bytes.
int size_;
// The alignment that the object must satisfy for this reference - must be a
// power of two.
int object_alignment_;
// Specify the possible locations where the object could be stored. AArch32's
// PC offset, and T32's PC alignment calculations should be applied by the
// Assembler, not here. The PoolManager deals only with simple locationes.
// Including min_object_adddress_ is necessary to handle AArch32 some
// instructions which have a minimum offset of 0, but also have the implicit
// PC offset.
// Note that this structure cannot handle sparse ranges, such as A32's ADR,
// but doing so is costly and probably not useful in practice. The min and
// and max object location both refer to the beginning of the object, are
// inclusive and are not affected by the object size. E.g. if
// max_object_location_ is equal to X, we can place the object at location X
// regardless of its size.
T min_object_location_;
T max_object_location_;
friend class PoolManager<T>;
};
template <typename T>
class PoolManager {
public:
PoolManager(int header_size, int alignment, int buffer_alignment)
: header_size_(header_size),
alignment_(alignment),
buffer_alignment_(buffer_alignment),
checkpoint_(std::numeric_limits<T>::max()),
max_pool_size_(0),
monitor_(0) {}
~PoolManager();
// Check if we will need to emit the pool at location 'pc', when planning to
// generate a certain number of bytes. This optionally takes a
// ForwardReference we are about to generate, in which case the size of the
// reference must be included in 'num_bytes'.
bool MustEmit(T pc,
int num_bytes = 0,
ForwardReference<T>* reference = NULL,
LocationBase<T>* object = NULL) const;
enum EmitOption { kBranchRequired, kNoBranchRequired };
// Emit the pool at location 'pc', using 'masm' as the macroassembler.
// The branch over the header can be optionally omitted using 'option'.
// Returns the new PC after pool emission.
// This expects a number of bytes that are about to be emitted, to be taken
// into account in heuristics for pool object emission.
// This also optionally takes a forward reference and an object as
// parameters, to be used in the case where emission of the pool is triggered
// by adding a new reference to the pool that does not fit. The pool manager
// will need this information in order to apply its heuristics correctly.
T Emit(MacroAssemblerInterface* masm,
T pc,
int num_bytes = 0,
ForwardReference<T>* new_reference = NULL,
LocationBase<T>* new_object = NULL,
EmitOption option = kBranchRequired);
// Add 'reference' to 'object'. Should not be preceded by a call to MustEmit()
// that returned true, unless Emit() has been successfully afterwards.
void AddObjectReference(const ForwardReference<T>* reference,
LocationBase<T>* object);
// This is to notify the pool that a LocationBase has been bound to a location
// and does not need to be tracked anymore.
// This will happen, for example, for Labels, which are manually bound by the
// user.
// This can potentially add some padding bytes in order to meet the object
// requirements, and will return the new location.
T Bind(MacroAssemblerInterface* masm, LocationBase<T>* object, T location);
// Functions for blocking and releasing the pools.
void Block() { monitor_++; }
void Release(T pc);
bool IsBlocked() const { return monitor_ != 0; }
private:
typedef typename std::vector<PoolObject<T> >::iterator objects_iter;
typedef
typename std::vector<PoolObject<T> >::const_iterator const_objects_iter;
PoolObject<T>* GetObjectIfTracked(LocationBase<T>* label) {
return const_cast<PoolObject<T>*>(
static_cast<const PoolManager<T>*>(this)->GetObjectIfTracked(label));
}
const PoolObject<T>* GetObjectIfTracked(LocationBase<T>* label) const {
for (const_objects_iter iter = objects_.begin(); iter != objects_.end();
++iter) {
const PoolObject<T>& current = *iter;
if (current.label_base_ == label) return &current;
}
return NULL;
}
// Helper function for calculating the checkpoint.
enum SortOption { kSortRequired, kNoSortRequired };
void RecalculateCheckpoint(SortOption sort_option = kSortRequired);
// Comparison function for using std::sort() on objects_. PoolObject A is
// ordered before PoolObject B when A should be emitted before B. The
// comparison depends on the max_location_, size_, alignment_ and
// min_location_.
static bool PoolObjectLessThan(const PoolObject<T>& a,
const PoolObject<T>& b);
// Helper function used in the checkpoint calculation. 'checkpoint' is the
// current checkpoint, which is modified to take 'object' into account. The
// new checkpoint is returned.
static T UpdateCheckpointForObject(T checkpoint, const PoolObject<T>* object);
// Helper function to add a new object into a sorted objects_ array.
void Insert(const PoolObject<T>& new_object);
// Helper functions to remove an object from objects_ and delete the
// corresponding LocationBase object, if necessary. This will be called
// either after placing the object, or when Bind() is called.
void RemoveAndDelete(PoolObject<T>* object);
objects_iter RemoveAndDelete(objects_iter iter);
// Helper function to check if we should skip emitting an object.
bool ShouldSkipObject(PoolObject<T>* pool_object,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
LocationBase<T>* new_object,
PoolObject<T>* existing_object) const;
// Used only for debugging.
void DumpCurrentState(T pc) const;
// Methods used for testing only, via the test friend classes.
bool PoolIsEmptyForTest() const { return objects_.empty(); }
T GetCheckpointForTest() const { return checkpoint_; }
int GetPoolSizeForTest() const;
// The objects we are tracking references to. The objects_ vector is sorted
// at all times between calls to the public members of the PoolManager. It
// is sorted every time we add, delete or update a PoolObject.
// TODO: Consider a more efficient data structure here, to allow us to delete
// elements as we emit them.
std::vector<PoolObject<T> > objects_;
// Objects to be deleted on pool destruction.
std::vector<LocationBase<T>*> delete_on_destruction_;
// The header_size_ and alignment_ values are hardcoded for each instance of
// PoolManager. The PoolManager does not know how to emit the header, and
// relies on the EmitPoolHeader and EndPool methods of the
// MacroAssemblerInterface for that. It will also emit padding if necessary,
// both for the header and at the end of the pool, according to alignment_,
// and using the EmitNopBytes and EmitPaddingBytes method of the
// MacroAssemblerInterface.
// The size of the header, in bytes.
int header_size_;
// The alignment of the header - must be a power of two.
int alignment_;
// The alignment of the buffer - we cannot guarantee any object alignment
// larger than this alignment. When a buffer is grown, this alignment has
// to be guaranteed.
// TODO: Consider extending this to describe the guaranteed alignment as the
// modulo of a known number.
int buffer_alignment_;
// The current checkpoint. This is the latest location at which the pool
// *must* be emitted. This should not be visible outside the pool manager
// and should only be updated in RecalculateCheckpoint.
T checkpoint_;
// Maximum size of the pool, assuming we need the maximum possible padding
// for each object and for the header. It is only updated in
// RecalculateCheckpoint.
T max_pool_size_;
// Indicates whether the emission of this pool is blocked.
int monitor_;
friend class vixl::TestPoolManager;
};
} // namespace vixl
#endif // VIXL_POOL_MANAGER_H_

File diff suppressed because it is too large Load Diff