dep: Add vixl (AArch32/64 assembler)

This commit is contained in:
Connor McLaughlin 2019-12-04 20:11:06 +10:00
parent baaa94d4c1
commit d520ca35eb
61 changed files with 178153 additions and 1 deletions

View file

@ -9,6 +9,10 @@ if(NOT ANDROID)
add_subdirectory(nativefiledialog) add_subdirectory(nativefiledialog)
endif() endif()
if(${CPU_ARCH} STREQUAL "aarch64")
add_subdirectory(vixl)
endif()
###################### YBaseLib ############################ ###################### YBaseLib ############################
set(YBASELIB_SRC_BASE ${CMAKE_SOURCE_DIR}/dep/YBaseLib/Source) set(YBASELIB_SRC_BASE ${CMAKE_SOURCE_DIR}/dep/YBaseLib/Source)

8
dep/vixl/AUTHORS Normal file
View file

@ -0,0 +1,8 @@
# Below is a list of people and organisations that have contributed to the VIXL
# project. Entries should be added to the list as:
#
# Name/Organization <email address>
ARM Ltd. <*@arm.com>
Google Inc. <*@google.com>
Linaro <*@linaro.org>

73
dep/vixl/CMakeLists.txt Normal file
View file

@ -0,0 +1,73 @@
set(SRCS
include/vixl/aarch32/assembler-aarch32.h
include/vixl/aarch32/constants-aarch32.h
include/vixl/aarch32/disasm-aarch32.h
include/vixl/aarch32/instructions-aarch32.h
include/vixl/aarch32/location-aarch32.h
include/vixl/aarch32/macro-assembler-aarch32.h
include/vixl/aarch32/operands-aarch32.h
include/vixl/aarch64/abi-aarch64.h
include/vixl/aarch64/assembler-aarch64.h
include/vixl/aarch64/constants-aarch64.h
include/vixl/aarch64/cpu-aarch64.h
include/vixl/aarch64/cpu-features-auditor-aarch64.h
include/vixl/aarch64/decoder-aarch64.h
include/vixl/aarch64/disasm-aarch64.h
include/vixl/aarch64/instructions-aarch64.h
include/vixl/aarch64/instrument-aarch64.h
include/vixl/aarch64/macro-assembler-aarch64.h
include/vixl/aarch64/operands-aarch64.h
include/vixl/aarch64/simulator-aarch64.h
include/vixl/aarch64/simulator-constants-aarch64.h
include/vixl/assembler-base-vixl.h
include/vixl/code-buffer-vixl.h
include/vixl/code-generation-scopes-vixl.h
include/vixl/compiler-intrinsics-vixl.h
include/vixl/cpu-features.h
include/vixl/globals-vixl.h
include/vixl/invalset-vixl.h
include/vixl/macro-assembler-interface.h
include/vixl/platform-vixl.h
include/vixl/pool-manager-impl.h
include/vixl/pool-manager.h
include/vixl/utils-vixl.h
src/aarch32/assembler-aarch32.cc
src/aarch32/constants-aarch32.cc
src/aarch32/disasm-aarch32.cc
src/aarch32/instructions-aarch32.cc
src/aarch32/location-aarch32.cc
src/aarch32/macro-assembler-aarch32.cc
src/aarch32/operands-aarch32.cc
src/aarch64/assembler-aarch64.cc
src/aarch64/cpu-aarch64.cc
src/aarch64/cpu-features-auditor-aarch64.cc
src/aarch64/decoder-aarch64.cc
src/aarch64/disasm-aarch64.cc
src/aarch64/instructions-aarch64.cc
src/aarch64/instrument-aarch64.cc
src/aarch64/logic-aarch64.cc
src/aarch64/macro-assembler-aarch64.cc
src/aarch64/operands-aarch64.cc
src/aarch64/pointer-auth-aarch64.cc
src/aarch64/simulator-aarch64.cc
src/code-buffer-vixl.cc
src/compiler-intrinsics-vixl.cc
src/cpu-features.cc
src/utils-vixl.cc
)
add_library(vixl ${SRCS})
target_include_directories(vixl PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include
)
target_include_directories(vixl PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/include/vixl
${CMAKE_CURRENT_SOURCE_DIR}/include/vixl/aarch32
${CMAKE_CURRENT_SOURCE_DIR}/include/vixl/aarch64
)
target_compile_definitions(vixl PUBLIC
VIXL_INCLUDE_TARGET_AARCH32
VIXL_INCLUDE_TARGET_AARCH64
VIXL_CODE_BUFFER_MMAP
)

30
dep/vixl/LICENCE Normal file
View file

@ -0,0 +1,30 @@
LICENCE
=======
The software in this repository is covered by the following licence.
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

186
dep/vixl/README.md Normal file
View file

@ -0,0 +1,186 @@
VIXL: Armv8 Runtime Code Generation Library, 3.0.0
==================================================
Contents:
* Overview
* Licence
* Requirements
* Known limitations
* Usage
Overview
========
VIXL contains three components.
1. Programmatic **assemblers** to generate A64, A32 or T32 code at runtime. The
assemblers abstract some of the constraints of each ISA; for example, most
instructions support any immediate.
2. **Disassemblers** that can print any instruction emitted by the assemblers.
3. A **simulator** that can simulate any instruction emitted by the A64
assembler. The simulator allows generated code to be run on another
architecture without the need for a full ISA model.
The VIXL git repository can be found [on 'https://git.linaro.org'][vixl].
Changes from previous versions of VIXL can be found in the
[Changelog](doc/changelog.md).
Licence
=======
This software is covered by the licence described in the [LICENCE](LICENCE)
file.
Requirements
============
To build VIXL the following software is required:
1. Python 2.7
2. SCons 2.0
3. GCC 4.8+ or Clang 3.4+
A 64-bit host machine is required, implementing an LP64 data model. VIXL has
been tested using GCC on AArch64 Debian, GCC and Clang on amd64 Ubuntu
systems.
To run the linter and code formatting stages of the tests, the following
software is also required:
1. Git
2. [Google's `cpplint.py`][cpplint]
3. clang-format-3.8
Refer to the 'Usage' section for details.
Known Limitations for AArch64 code generation
=============================================
VIXL was developed for JavaScript engines so a number of features from A64 were
deemed unnecessary:
* Limited rounding mode support for floating point.
* Limited support for synchronisation instructions.
* Limited support for system instructions.
* A few miscellaneous integer and floating point instructions are missing.
The VIXL simulator supports only those instructions that the VIXL assembler can
generate. The `doc` directory contains a
[list of supported A64 instructions](doc/aarch64/supported-instructions-aarch64.md).
The VIXL simulator was developed to run on 64-bit amd64 platforms. Whilst it
builds and mostly works for 32-bit x86 platforms, there are a number of
floating-point operations which do not work correctly, and a number of tests
fail as a result.
VIXL may not build using Clang 3.7, due to a compiler warning. A workaround is
to disable conversion of warnings to errors, or to delete the offending
`return` statement reported and rebuild. This problem will be fixed in the next
release.
Debug Builds
------------
Your project's build system must define `VIXL_DEBUG` (eg. `-DVIXL_DEBUG`)
when using a VIXL library that has been built with debug enabled.
Some classes defined in VIXL header files contain fields that are only present
in debug builds, so if `VIXL_DEBUG` is defined when the library is built, but
not defined for the header files included in your project, you will see runtime
failures.
Exclusive-Access Instructions
-----------------------------
All exclusive-access instructions are supported, but the simulator cannot
accurately simulate their behaviour as described in the ARMv8 Architecture
Reference Manual.
* A local monitor is simulated, so simulated exclusive loads and stores execute
as expected in a single-threaded environment.
* The global monitor is simulated by occasionally causing exclusive-access
instructions to fail regardless of the local monitor state.
* Load-acquire, store-release semantics are approximated by issuing a host
memory barrier after loads or before stores. The built-in
`__sync_synchronize()` is used for this purpose.
The simulator tries to be strict, and implements the following restrictions that
the ARMv8 ARM allows:
* A pair of load-/store-exclusive instructions will only succeed if they have
the same address and access size.
* Most of the time, cache-maintenance operations or explicit memory accesses
will clear the exclusive monitor.
* To ensure that simulated code does not depend on this behaviour, the
exclusive monitor will sometimes be left intact after these instructions.
Instructions affected by these limitations:
`stxrb`, `stxrh`, `stxr`, `ldxrb`, `ldxrh`, `ldxr`, `stxp`, `ldxp`, `stlxrb`,
`stlxrh`, `stlxr`, `ldaxrb`, `ldaxrh`, `ldaxr`, `stlxp`, `ldaxp`, `stlrb`,
`stlrh`, `stlr`, `ldarb`, `ldarh`, `ldar`, `clrex`.
Usage
=====
Running all Tests
-----------------
The helper script `tools/test.py` will build and run every test that is provided
with VIXL, in both release and debug mode. It is a useful script for verifying
that all of VIXL's dependencies are in place and that VIXL is working as it
should.
By default, the `tools/test.py` script runs a linter to check that the source
code conforms with the code style guide, and to detect several common errors
that the compiler may not warn about. This is most useful for VIXL developers.
The linter has the following dependencies:
1. Git must be installed, and the VIXL project must be in a valid Git
repository, such as one produced using `git clone`.
2. `cpplint.py`, [as provided by Google][cpplint], must be available (and
executable) on the `PATH`.
It is possible to tell `tools/test.py` to skip the linter stage by passing
`--nolint`. This removes the dependency on `cpplint.py` and Git. The `--nolint`
option is implied if the VIXL project is a snapshot (with no `.git` directory).
Additionally, `tools/test.py` tests code formatting using `clang-format-3.8`.
If you don't have `clang-format-3.8`, disable the test using the
`--noclang-format` option.
Also note that the tests for the tracing features depend upon external `diff`
and `sed` tools. If these tools are not available in `PATH`, these tests will
fail.
Getting Started
---------------
We have separate guides for introducing VIXL, depending on what architecture you
are targeting. A guide for working with AArch32 can be found
[here][getting-started-aarch32], while the AArch64 guide is
[here][getting-started-aarch64]. Example source code is provided in the
[examples](examples) directory. You can build examples with either `scons
aarch32_examples` or `scons aarch64_examples` from the root directory, or use
`scons --help` to get a detailed list of available build targets.
[cpplint]: http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
"Google's cpplint.py script."
[vixl]: https://git.linaro.org/arm/vixl.git
"The VIXL repository at 'https://git.linaro.org'."
[getting-started-aarch32]: doc/aarch32/getting-started-aarch32.md
"Introduction to VIXL for AArch32."
[getting-started-aarch64]: doc/aarch64/getting-started-aarch64.md
"Introduction to VIXL for AArch64."

30
dep/vixl/VERSIONS.md Normal file
View file

@ -0,0 +1,30 @@
Versioning
==========
Since version 3.0.0, VIXL uses [Semantic Versioning 2.0.0][semver].
Briefly:
- Backwards-incompatible changes update the _major_ version.
- New features update the _minor_ version.
- Bug fixes update the _patch_ version.
Why 3.0.0?
----------
VIXL was originally released as 1.x using snapshot releases. When we moved VIXL
into Linaro, we started working directly on `master` and stopped tagging
named releases. However, we informally called this "VIXL 2", so we are skipping
2.0.0 to avoid potential confusion.
Using `master`
--------------
Users who want to take the latest development version of VIXL can still take
commits from `master`. Our day-to-day development process hasn't changed and
these commits should still pass their own tests. However, note that commits not
explicitly tagged with a given version should be considered to be unversioned,
with no backwards-compatibility guarantees.
[semver]: https://semver.org/spec/v2.0.0.html
"Semantic Versioning 2.0.0 Specification"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,541 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CONSTANTS_AARCH32_H_
#define VIXL_CONSTANTS_AARCH32_H_
extern "C" {
#include <stdint.h>
}
#include "globals-vixl.h"
namespace vixl {
namespace aarch32 {
enum InstructionSet { A32, T32 };
#ifdef VIXL_INCLUDE_TARGET_T32_ONLY
const InstructionSet kDefaultISA = T32;
#else
const InstructionSet kDefaultISA = A32;
#endif
const unsigned kRegSizeInBits = 32;
const unsigned kRegSizeInBytes = kRegSizeInBits / 8;
const unsigned kSRegSizeInBits = 32;
const unsigned kSRegSizeInBytes = kSRegSizeInBits / 8;
const unsigned kDRegSizeInBits = 64;
const unsigned kDRegSizeInBytes = kDRegSizeInBits / 8;
const unsigned kQRegSizeInBits = 128;
const unsigned kQRegSizeInBytes = kQRegSizeInBits / 8;
const unsigned kNumberOfRegisters = 16;
const unsigned kNumberOfSRegisters = 32;
const unsigned kMaxNumberOfDRegisters = 32;
const unsigned kNumberOfQRegisters = 16;
const unsigned kNumberOfT32LowRegisters = 8;
const unsigned kIpCode = 12;
const unsigned kSpCode = 13;
const unsigned kLrCode = 14;
const unsigned kPcCode = 15;
const unsigned kT32PcDelta = 4;
const unsigned kA32PcDelta = 8;
const unsigned kRRXEncodedValue = 3;
const unsigned kCoprocMask = 0xe;
const unsigned kInvalidCoprocMask = 0xa;
const unsigned kLowestT32_32Opcode = 0xe8000000;
const uint32_t kUnknownValue = 0xdeadbeef;
const uint32_t kMaxInstructionSizeInBytes = 4;
const uint32_t kA32InstructionSizeInBytes = 4;
const uint32_t k32BitT32InstructionSizeInBytes = 4;
const uint32_t k16BitT32InstructionSizeInBytes = 2;
// Maximum size emitted by a single T32 unconditional macro-instruction.
const uint32_t kMaxT32MacroInstructionSizeInBytes = 32;
const uint32_t kCallerSavedRegistersMask = 0x500f;
const uint16_t k16BitT32NopOpcode = 0xbf00;
const uint16_t kCbzCbnzMask = 0xf500;
const uint16_t kCbzCbnzValue = 0xb100;
const int32_t kCbzCbnzRange = 126;
const int32_t kBConditionalNarrowRange = 254;
const int32_t kBNarrowRange = 2046;
const int32_t kNearLabelRange = kBNarrowRange;
enum SystemFunctionsOpcodes { kPrintfCode };
enum BranchHint { kNear, kFar, kBranchWithoutHint };
// Start of generated code.
// AArch32 version implemented by the library (v8.0).
// The encoding for vX.Y is: (X << 8) | Y.
#define AARCH32_VERSION 0x0800
enum InstructionAttribute {
kNoAttribute = 0,
kArithmetic = 0x1,
kBitwise = 0x2,
kShift = 0x4,
kAddress = 0x8,
kBranch = 0x10,
kSystem = 0x20,
kFpNeon = 0x40,
kLoadStore = 0x80,
kLoadStoreMultiple = 0x100
};
enum InstructionType {
kUndefInstructionType,
kAdc,
kAdcs,
kAdd,
kAdds,
kAddw,
kAdr,
kAnd,
kAnds,
kAsr,
kAsrs,
kB,
kBfc,
kBfi,
kBic,
kBics,
kBkpt,
kBl,
kBlx,
kBx,
kBxj,
kCbnz,
kCbz,
kClrex,
kClz,
kCmn,
kCmp,
kCrc32b,
kCrc32cb,
kCrc32ch,
kCrc32cw,
kCrc32h,
kCrc32w,
kDmb,
kDsb,
kEor,
kEors,
kFldmdbx,
kFldmiax,
kFstmdbx,
kFstmiax,
kHlt,
kHvc,
kIsb,
kIt,
kLda,
kLdab,
kLdaex,
kLdaexb,
kLdaexd,
kLdaexh,
kLdah,
kLdm,
kLdmda,
kLdmdb,
kLdmea,
kLdmed,
kLdmfa,
kLdmfd,
kLdmib,
kLdr,
kLdrb,
kLdrd,
kLdrex,
kLdrexb,
kLdrexd,
kLdrexh,
kLdrh,
kLdrsb,
kLdrsh,
kLsl,
kLsls,
kLsr,
kLsrs,
kMla,
kMlas,
kMls,
kMov,
kMovs,
kMovt,
kMovw,
kMrs,
kMsr,
kMul,
kMuls,
kMvn,
kMvns,
kNop,
kOrn,
kOrns,
kOrr,
kOrrs,
kPkhbt,
kPkhtb,
kPld,
kPldw,
kPli,
kPop,
kPush,
kQadd,
kQadd16,
kQadd8,
kQasx,
kQdadd,
kQdsub,
kQsax,
kQsub,
kQsub16,
kQsub8,
kRbit,
kRev,
kRev16,
kRevsh,
kRor,
kRors,
kRrx,
kRrxs,
kRsb,
kRsbs,
kRsc,
kRscs,
kSadd16,
kSadd8,
kSasx,
kSbc,
kSbcs,
kSbfx,
kSdiv,
kSel,
kShadd16,
kShadd8,
kShasx,
kShsax,
kShsub16,
kShsub8,
kSmlabb,
kSmlabt,
kSmlad,
kSmladx,
kSmlal,
kSmlalbb,
kSmlalbt,
kSmlald,
kSmlaldx,
kSmlals,
kSmlaltb,
kSmlaltt,
kSmlatb,
kSmlatt,
kSmlawb,
kSmlawt,
kSmlsd,
kSmlsdx,
kSmlsld,
kSmlsldx,
kSmmla,
kSmmlar,
kSmmls,
kSmmlsr,
kSmmul,
kSmmulr,
kSmuad,
kSmuadx,
kSmulbb,
kSmulbt,
kSmull,
kSmulls,
kSmultb,
kSmultt,
kSmulwb,
kSmulwt,
kSmusd,
kSmusdx,
kSsat,
kSsat16,
kSsax,
kSsub16,
kSsub8,
kStl,
kStlb,
kStlex,
kStlexb,
kStlexd,
kStlexh,
kStlh,
kStm,
kStmda,
kStmdb,
kStmea,
kStmed,
kStmfa,
kStmfd,
kStmib,
kStr,
kStrb,
kStrd,
kStrex,
kStrexb,
kStrexd,
kStrexh,
kStrh,
kSub,
kSubs,
kSubw,
kSvc,
kSxtab,
kSxtab16,
kSxtah,
kSxtb,
kSxtb16,
kSxth,
kTbb,
kTbh,
kTeq,
kTst,
kUadd16,
kUadd8,
kUasx,
kUbfx,
kUdf,
kUdiv,
kUhadd16,
kUhadd8,
kUhasx,
kUhsax,
kUhsub16,
kUhsub8,
kUmaal,
kUmlal,
kUmlals,
kUmull,
kUmulls,
kUqadd16,
kUqadd8,
kUqasx,
kUqsax,
kUqsub16,
kUqsub8,
kUsad8,
kUsada8,
kUsat,
kUsat16,
kUsax,
kUsub16,
kUsub8,
kUxtab,
kUxtab16,
kUxtah,
kUxtb,
kUxtb16,
kUxth,
kVaba,
kVabal,
kVabd,
kVabdl,
kVabs,
kVacge,
kVacgt,
kVacle,
kVaclt,
kVadd,
kVaddhn,
kVaddl,
kVaddw,
kVand,
kVbic,
kVbif,
kVbit,
kVbsl,
kVceq,
kVcge,
kVcgt,
kVcle,
kVcls,
kVclt,
kVclz,
kVcmp,
kVcmpe,
kVcnt,
kVcvt,
kVcvta,
kVcvtb,
kVcvtm,
kVcvtn,
kVcvtp,
kVcvtr,
kVcvtt,
kVdiv,
kVdup,
kVeor,
kVext,
kVfma,
kVfms,
kVfnma,
kVfnms,
kVhadd,
kVhsub,
kVld1,
kVld2,
kVld3,
kVld4,
kVldm,
kVldmdb,
kVldmia,
kVldr,
kVmax,
kVmaxnm,
kVmin,
kVminnm,
kVmla,
kVmlal,
kVmls,
kVmlsl,
kVmov,
kVmovl,
kVmovn,
kVmrs,
kVmsr,
kVmul,
kVmull,
kVmvn,
kVneg,
kVnmla,
kVnmls,
kVnmul,
kVorn,
kVorr,
kVpadal,
kVpadd,
kVpaddl,
kVpmax,
kVpmin,
kVpop,
kVpush,
kVqabs,
kVqadd,
kVqdmlal,
kVqdmlsl,
kVqdmulh,
kVqdmull,
kVqmovn,
kVqmovun,
kVqneg,
kVqrdmulh,
kVqrshl,
kVqrshrn,
kVqrshrun,
kVqshl,
kVqshlu,
kVqshrn,
kVqshrun,
kVqsub,
kVraddhn,
kVrecpe,
kVrecps,
kVrev16,
kVrev32,
kVrev64,
kVrhadd,
kVrinta,
kVrintm,
kVrintn,
kVrintp,
kVrintr,
kVrintx,
kVrintz,
kVrshl,
kVrshr,
kVrshrn,
kVrsqrte,
kVrsqrts,
kVrsra,
kVrsubhn,
kVseleq,
kVselge,
kVselgt,
kVselvs,
kVshl,
kVshll,
kVshr,
kVshrn,
kVsli,
kVsqrt,
kVsra,
kVsri,
kVst1,
kVst2,
kVst3,
kVst4,
kVstm,
kVstmdb,
kVstmia,
kVstr,
kVsub,
kVsubhn,
kVsubl,
kVsubw,
kVswp,
kVtbl,
kVtbx,
kVtrn,
kVtst,
kVuzp,
kVzip,
kYield
};
const char* ToCString(InstructionType type);
// End of generated code.
inline InstructionAttribute operator|(InstructionAttribute left,
InstructionAttribute right) {
return static_cast<InstructionAttribute>(static_cast<uint32_t>(left) |
static_cast<uint32_t>(right));
}
} // namespace aarch32
} // namespace vixl
#endif // VIXL_CONSTANTS_AARCH32_H_

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,411 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH32_LABEL_AARCH32_H_
#define VIXL_AARCH32_LABEL_AARCH32_H_
extern "C" {
#include <stdint.h>
}
#include <algorithm>
#include <cstddef>
#include <iomanip>
#include <list>
#include "invalset-vixl.h"
#include "pool-manager.h"
#include "utils-vixl.h"
#include "constants-aarch32.h"
#include "instructions-aarch32.h"
namespace vixl {
namespace aarch32 {
class MacroAssembler;
class Location : public LocationBase<int32_t> {
friend class Assembler;
friend class MacroAssembler;
public:
// Unbound location that can be used with the assembler bind() method and
// with the assembler methods for generating instructions, but will never
// be handled by the pool manager.
Location()
: LocationBase<int32_t>(kRawLocation, 1 /* dummy size*/),
referenced_(false) {}
typedef int32_t Offset;
~Location() {
#ifdef VIXL_DEBUG
if (IsReferenced() && !IsBound()) {
VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n");
}
#endif
}
bool IsReferenced() const { return referenced_; }
private:
class EmitOperator {
public:
explicit EmitOperator(InstructionSet isa) : isa_(isa) {
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
USE(isa_);
VIXL_ASSERT(isa == A32);
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
USE(isa_);
VIXL_ASSERT(isa == T32);
#endif
}
virtual ~EmitOperator() {}
virtual uint32_t Encode(uint32_t /*instr*/,
Location::Offset /*pc*/,
const Location* /*label*/) const {
return 0;
}
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
bool IsUsingT32() const { return false; }
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
bool IsUsingT32() const { return true; }
#else
bool IsUsingT32() const { return isa_ == T32; }
#endif
private:
InstructionSet isa_;
};
protected:
class ForwardRef : public ForwardReference<int32_t> {
public:
// Default constructor for InvalSet.
ForwardRef() : ForwardReference<int32_t>(0, 0, 0, 0, 1), op_(NULL) {}
ForwardRef(const Location::EmitOperator* op,
int32_t location,
int size,
int32_t min_object_location,
int32_t max_object_location,
int object_alignment = 1)
: ForwardReference<int32_t>(location,
size,
min_object_location,
max_object_location,
object_alignment),
op_(op) {}
const Location::EmitOperator* op() const { return op_; }
// We must provide comparison operators to work with InvalSet.
bool operator==(const ForwardRef& other) const {
return GetLocation() == other.GetLocation();
}
bool operator<(const ForwardRef& other) const {
return GetLocation() < other.GetLocation();
}
bool operator<=(const ForwardRef& other) const {
return GetLocation() <= other.GetLocation();
}
bool operator>(const ForwardRef& other) const {
return GetLocation() > other.GetLocation();
}
private:
const Location::EmitOperator* op_;
};
static const int kNPreallocatedElements = 4;
// The following parameters will not affect ForwardRefList in practice, as we
// resolve all references at once and clear the list, so we do not need to
// remove individual elements by invalidating them.
static const int32_t kInvalidLinkKey = INT32_MAX;
static const size_t kReclaimFrom = 512;
static const size_t kReclaimFactor = 2;
typedef InvalSet<ForwardRef,
kNPreallocatedElements,
int32_t,
kInvalidLinkKey,
kReclaimFrom,
kReclaimFactor>
ForwardRefListBase;
typedef InvalSetIterator<ForwardRefListBase> ForwardRefListIteratorBase;
class ForwardRefList : public ForwardRefListBase {
public:
ForwardRefList() : ForwardRefListBase() {}
using ForwardRefListBase::Back;
using ForwardRefListBase::Front;
};
class ForwardRefListIterator : public ForwardRefListIteratorBase {
public:
explicit ForwardRefListIterator(Location* location)
: ForwardRefListIteratorBase(&location->forward_) {}
// TODO: Remove these and use the STL-like interface instead. We'll need a
// const_iterator implemented for this.
using ForwardRefListIteratorBase::Advance;
using ForwardRefListIteratorBase::Current;
};
// For InvalSet::GetKey() and InvalSet::SetKey().
friend class InvalSet<ForwardRef,
kNPreallocatedElements,
int32_t,
kInvalidLinkKey,
kReclaimFrom,
kReclaimFactor>;
private:
virtual void ResolveReferences(internal::AssemblerBase* assembler)
VIXL_OVERRIDE;
void SetReferenced() { referenced_ = true; }
bool HasForwardReferences() const { return !forward_.empty(); }
ForwardRef GetLastForwardReference() const {
VIXL_ASSERT(HasForwardReferences());
return forward_.Back();
}
// Add forward reference to this object. Called from the assembler.
void AddForwardRef(int32_t instr_location,
const EmitOperator& op,
const ReferenceInfo* info);
// Check if we need to add padding when binding this object, in order to
// meet the minimum location requirement.
bool Needs16BitPadding(int location) const;
void EncodeLocationFor(internal::AssemblerBase* assembler,
int32_t from,
const Location::EmitOperator* encoder);
// True if the label has been used at least once.
bool referenced_;
protected:
// Types passed to LocationBase. Must be distinct for unbound Locations (not
// relevant for bound locations, as they don't have a correspoding
// PoolObject).
static const int kRawLocation = 0; // Will not be used by the pool manager.
static const int kVeneerType = 1;
static const int kLiteralType = 2;
// Contains the references to the unbound label
ForwardRefList forward_;
// To be used only by derived classes.
Location(uint32_t type, int size, int alignment)
: LocationBase<int32_t>(type, size, alignment), referenced_(false) {}
// To be used only by derived classes.
explicit Location(Offset location)
: LocationBase<int32_t>(location), referenced_(false) {}
virtual int GetMaxAlignment() const VIXL_OVERRIDE;
virtual int GetMinLocation() const VIXL_OVERRIDE;
private:
// Included to make the class concrete, however should never be called.
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE {
USE(masm);
VIXL_UNREACHABLE();
}
};
class Label : public Location {
static const int kVeneerSize = 4;
// Use an alignment of 1 for all architectures. Even though we can bind an
// unused label, because of the way the MacroAssembler works we can always be
// sure to have the correct buffer alignment for the instruction set we are
// using, so we do not need to enforce additional alignment requirements
// here.
// TODO: Consider modifying the interface of the pool manager to pass an
// optional additional alignment to Bind() in order to handle cases where the
// buffer could be unaligned.
static const int kVeneerAlignment = 1;
public:
Label() : Location(kVeneerType, kVeneerSize, kVeneerAlignment) {}
explicit Label(Offset location) : Location(location) {}
private:
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
return false;
}
virtual bool ShouldDeletePoolObjectOnPlacement() const VIXL_OVERRIDE {
return false;
}
virtual void UpdatePoolObject(PoolObject<int32_t>* object) VIXL_OVERRIDE;
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
virtual bool UsePoolObjectEmissionMargin() const VIXL_OVERRIDE {
return true;
}
virtual int32_t GetPoolObjectEmissionMargin() const VIXL_OVERRIDE {
VIXL_ASSERT(UsePoolObjectEmissionMargin() == true);
return 1 * KBytes;
}
};
class RawLiteral : public Location {
// Some load instructions require alignment to 4 bytes. Since we do
// not know what instructions will reference a literal after we place
// it, we enforce a 4 byte alignment for literals that are 4 bytes or
// larger.
static const int kLiteralAlignment = 4;
public:
enum PlacementPolicy { kPlacedWhenUsed, kManuallyPlaced };
enum DeletionPolicy {
kDeletedOnPlacementByPool,
kDeletedOnPoolDestruction,
kManuallyDeleted
};
RawLiteral(const void* addr,
int size,
PlacementPolicy placement_policy = kPlacedWhenUsed,
DeletionPolicy deletion_policy = kManuallyDeleted)
: Location(kLiteralType,
size,
(size < kLiteralAlignment) ? size : kLiteralAlignment),
addr_(addr),
manually_placed_(placement_policy == kManuallyPlaced),
deletion_policy_(deletion_policy) {
// We can't have manually placed literals that are not manually deleted.
VIXL_ASSERT(!IsManuallyPlaced() ||
(GetDeletionPolicy() == kManuallyDeleted));
}
RawLiteral(const void* addr, int size, DeletionPolicy deletion_policy)
: Location(kLiteralType,
size,
(size < kLiteralAlignment) ? size : kLiteralAlignment),
addr_(addr),
manually_placed_(false),
deletion_policy_(deletion_policy) {}
const void* GetDataAddress() const { return addr_; }
int GetSize() const { return GetPoolObjectSizeInBytes(); }
bool IsManuallyPlaced() const { return manually_placed_; }
private:
DeletionPolicy GetDeletionPolicy() const { return deletion_policy_; }
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
return GetDeletionPolicy() == kDeletedOnPlacementByPool;
}
virtual bool ShouldBeDeletedOnPoolManagerDestruction() const VIXL_OVERRIDE {
return GetDeletionPolicy() == kDeletedOnPoolDestruction;
}
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
// Data address before it's moved into the code buffer.
const void* const addr_;
// When this flag is true, the label will be placed manually.
bool manually_placed_;
// When is the literal to be removed from the memory
// Can be delete'd when:
// moved into the code buffer: kDeletedOnPlacementByPool
// the pool is delete'd: kDeletedOnPoolDestruction
// or left to the application: kManuallyDeleted.
DeletionPolicy deletion_policy_;
friend class MacroAssembler;
};
template <typename T>
class Literal : public RawLiteral {
public:
explicit Literal(const T& value,
PlacementPolicy placement_policy = kPlacedWhenUsed,
DeletionPolicy deletion_policy = kManuallyDeleted)
: RawLiteral(&value_, sizeof(T), placement_policy, deletion_policy),
value_(value) {}
explicit Literal(const T& value, DeletionPolicy deletion_policy)
: RawLiteral(&value_, sizeof(T), deletion_policy), value_(value) {}
void UpdateValue(const T& value, CodeBuffer* buffer) {
value_ = value;
if (IsBound()) {
buffer->UpdateData(GetLocation(), GetDataAddress(), GetSize());
}
}
private:
T value_;
};
class StringLiteral : public RawLiteral {
public:
explicit StringLiteral(const char* str,
PlacementPolicy placement_policy = kPlacedWhenUsed,
DeletionPolicy deletion_policy = kManuallyDeleted)
: RawLiteral(str,
static_cast<int>(strlen(str) + 1),
placement_policy,
deletion_policy) {
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
}
explicit StringLiteral(const char* str, DeletionPolicy deletion_policy)
: RawLiteral(str, static_cast<int>(strlen(str) + 1), deletion_policy) {
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
}
};
} // namespace aarch32
// Required InvalSet template specialisations.
#define INVAL_SET_TEMPLATE_PARAMETERS \
aarch32::Location::ForwardRef, aarch32::Location::kNPreallocatedElements, \
int32_t, aarch32::Location::kInvalidLinkKey, \
aarch32::Location::kReclaimFrom, aarch32::Location::kReclaimFactor
template <>
inline int32_t InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::GetKey(
const aarch32::Location::ForwardRef& element) {
return element.GetLocation();
}
template <>
inline void InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::SetKey(
aarch32::Location::ForwardRef* element, int32_t key) {
element->SetLocationToInvalidateOnly(key);
}
#undef INVAL_SET_TEMPLATE_PARAMETERS
} // namespace vixl
#endif // VIXL_AARCH32_LABEL_AARCH32_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,927 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH32_OPERANDS_AARCH32_H_
#define VIXL_AARCH32_OPERANDS_AARCH32_H_
#include "aarch32/instructions-aarch32.h"
namespace vixl {
namespace aarch32 {
// Operand represents generic set of arguments to pass to an instruction.
//
// Usage: <instr> <Rd> , <Operand>
//
// where <instr> is the instruction to use (e.g., Mov(), Rsb(), etc.)
// <Rd> is the destination register
// <Operand> is the rest of the arguments to the instruction
//
// <Operand> can be one of:
//
// #<imm> - an unsigned 32-bit immediate value
// <Rm>, <shift> <#amount> - immediate shifted register
// <Rm>, <shift> <Rs> - register shifted register
//
class Operand {
public:
// { #<immediate> }
// where <immediate> is uint32_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(uint32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoReg),
shift_(LSL),
amount_(0),
rs_(NoReg) {}
Operand(int32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoReg),
shift_(LSL),
amount_(0),
rs_(NoReg) {}
// rm
// where rm is the base register
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(Register rm) // NOLINT(runtime/explicit)
: imm_(0),
rm_(rm),
shift_(LSL),
amount_(0),
rs_(NoReg) {
VIXL_ASSERT(rm_.IsValid());
}
// rm, <shift>
// where rm is the base register, and
// <shift> is RRX
Operand(Register rm, Shift shift)
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(NoReg) {
VIXL_ASSERT(rm_.IsValid());
VIXL_ASSERT(shift_.IsRRX());
}
// rm, <shift> #<amount>
// where rm is the base register, and
// <shift> is one of {LSL, LSR, ASR, ROR}, and
// <amount> is uint6_t.
Operand(Register rm, Shift shift, uint32_t amount)
: imm_(0), rm_(rm), shift_(shift), amount_(amount), rs_(NoReg) {
VIXL_ASSERT(rm_.IsValid());
VIXL_ASSERT(!shift_.IsRRX());
#ifdef VIXL_DEBUG
switch (shift_.GetType()) {
case LSL:
VIXL_ASSERT(amount_ <= 31);
break;
case ROR:
VIXL_ASSERT(amount_ <= 31);
break;
case LSR:
case ASR:
VIXL_ASSERT(amount_ <= 32);
break;
case RRX:
default:
VIXL_UNREACHABLE();
break;
}
#endif
}
// rm, <shift> rs
// where rm is the base register, and
// <shift> is one of {LSL, LSR, ASR, ROR}, and
// rs is the shifted register
Operand(Register rm, Shift shift, Register rs)
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(rs) {
VIXL_ASSERT(rm_.IsValid() && rs_.IsValid());
VIXL_ASSERT(!shift_.IsRRX());
}
// Factory methods creating operands from any integral or pointer type. The
// source must fit into 32 bits.
template <typename T>
static Operand From(T immediate) {
#if __cplusplus >= 201103L
VIXL_STATIC_ASSERT_MESSAGE(std::is_integral<T>::value,
"An integral type is required to build an "
"immediate operand.");
#endif
// Allow both a signed or unsigned 32 bit integer to be passed, but store it
// as a uint32_t. The signedness information will be lost. We have to add a
// static_cast to make sure the compiler does not complain about implicit 64
// to 32 narrowing. It's perfectly acceptable for the user to pass a 64-bit
// value, as long as it can be encoded in 32 bits.
VIXL_ASSERT(IsInt32(immediate) || IsUint32(immediate));
return Operand(static_cast<uint32_t>(immediate));
}
template <typename T>
static Operand From(T* address) {
uintptr_t address_as_integral = reinterpret_cast<uintptr_t>(address);
VIXL_ASSERT(IsUint32(address_as_integral));
return Operand(static_cast<uint32_t>(address_as_integral));
}
bool IsImmediate() const { return !rm_.IsValid(); }
bool IsPlainRegister() const {
return rm_.IsValid() && !shift_.IsRRX() && !rs_.IsValid() && (amount_ == 0);
}
bool IsImmediateShiftedRegister() const {
return rm_.IsValid() && !rs_.IsValid();
}
bool IsRegisterShiftedRegister() const {
return rm_.IsValid() && rs_.IsValid();
}
uint32_t GetImmediate() const {
VIXL_ASSERT(IsImmediate());
return imm_;
}
int32_t GetSignedImmediate() const {
VIXL_ASSERT(IsImmediate());
int32_t result;
memcpy(&result, &imm_, sizeof(result));
return result;
}
Register GetBaseRegister() const {
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
return rm_;
}
Shift GetShift() const {
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
return shift_;
}
uint32_t GetShiftAmount() const {
VIXL_ASSERT(IsImmediateShiftedRegister());
return amount_;
}
Register GetShiftRegister() const {
VIXL_ASSERT(IsRegisterShiftedRegister());
return rs_;
}
uint32_t GetTypeEncodingValue() const {
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
}
private:
// Forbid implicitely creating operands around types that cannot be encoded
// into a uint32_t without loss.
#if __cplusplus >= 201103L
Operand(int64_t) = delete; // NOLINT(runtime/explicit)
Operand(uint64_t) = delete; // NOLINT(runtime/explicit)
Operand(float) = delete; // NOLINT(runtime/explicit)
Operand(double) = delete; // NOLINT(runtime/explicit)
#else
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(int64_t) { // NOLINT(runtime/explicit)
VIXL_UNREACHABLE();
}
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(uint64_t) { // NOLINT(runtime/explicit)
VIXL_UNREACHABLE();
}
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(float) { // NOLINT
VIXL_UNREACHABLE();
}
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(double) { // NOLINT
VIXL_UNREACHABLE();
}
#endif
uint32_t imm_;
Register rm_;
Shift shift_;
uint32_t amount_;
Register rs_;
};
std::ostream& operator<<(std::ostream& os, const Operand& operand);
class NeonImmediate {
template <typename T>
struct DataTypeIdentity {
T data_type_;
};
public:
// { #<immediate> }
// where <immediate> is 32 bit number.
// This is allowed to be an implicit constructor because NeonImmediate is
// a wrapper class that doesn't normally perform any type conversion.
NeonImmediate(uint32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I32) {}
NeonImmediate(int immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I32) {}
// { #<immediate> }
// where <immediate> is a 64 bit number
// This is allowed to be an implicit constructor because NeonImmediate is
// a wrapper class that doesn't normally perform any type conversion.
NeonImmediate(int64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I64) {}
NeonImmediate(uint64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(I64) {}
// { #<immediate> }
// where <immediate> is a non zero floating point number which can be encoded
// as an 8 bit floating point (checked by the constructor).
// This is allowed to be an implicit constructor because NeonImmediate is
// a wrapper class that doesn't normally perform any type conversion.
NeonImmediate(float immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(F32) {}
NeonImmediate(double immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
immediate_type_(F64) {}
NeonImmediate(const NeonImmediate& src)
: imm_(src.imm_), immediate_type_(src.immediate_type_) {}
template <typename T>
T GetImmediate() const {
return GetImmediate(DataTypeIdentity<T>());
}
template <typename T>
T GetImmediate(const DataTypeIdentity<T>&) const {
VIXL_ASSERT(sizeof(T) <= sizeof(uint32_t));
VIXL_ASSERT(CanConvert<T>());
if (immediate_type_.Is(I64))
return static_cast<T>(imm_.u64_ & static_cast<T>(-1));
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
return static_cast<T>(imm_.u32_ & static_cast<T>(-1));
}
uint64_t GetImmediate(const DataTypeIdentity<uint64_t>&) const {
VIXL_ASSERT(CanConvert<uint64_t>());
if (immediate_type_.Is(I32)) return imm_.u32_;
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
return imm_.u64_;
}
float GetImmediate(const DataTypeIdentity<float>&) const {
VIXL_ASSERT(CanConvert<float>());
if (immediate_type_.Is(F64)) return static_cast<float>(imm_.d_);
return imm_.f_;
}
double GetImmediate(const DataTypeIdentity<double>&) const {
VIXL_ASSERT(CanConvert<double>());
if (immediate_type_.Is(F32)) return static_cast<double>(imm_.f_);
return imm_.d_;
}
bool IsInteger32() const { return immediate_type_.Is(I32); }
bool IsInteger64() const { return immediate_type_.Is(I64); }
bool IsInteger() const { return IsInteger32() | IsInteger64(); }
bool IsFloat() const { return immediate_type_.Is(F32); }
bool IsDouble() const { return immediate_type_.Is(F64); }
bool IsFloatZero() const {
if (immediate_type_.Is(F32)) return imm_.f_ == 0.0f;
if (immediate_type_.Is(F64)) return imm_.d_ == 0.0;
return false;
}
template <typename T>
bool CanConvert() const {
return CanConvert(DataTypeIdentity<T>());
}
template <typename T>
bool CanConvert(const DataTypeIdentity<T>&) const {
VIXL_ASSERT(sizeof(T) < sizeof(uint32_t));
return (immediate_type_.Is(I32) && ((imm_.u32_ >> (8 * sizeof(T))) == 0)) ||
(immediate_type_.Is(I64) && ((imm_.u64_ >> (8 * sizeof(T))) == 0)) ||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
}
bool CanConvert(const DataTypeIdentity<uint32_t>&) const {
return immediate_type_.Is(I32) ||
(immediate_type_.Is(I64) && ((imm_.u64_ >> 32) == 0)) ||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
}
bool CanConvert(const DataTypeIdentity<uint64_t>&) const {
return IsInteger() || CanConvert<uint32_t>();
}
bool CanConvert(const DataTypeIdentity<float>&) const {
return IsFloat() || IsDouble();
}
bool CanConvert(const DataTypeIdentity<double>&) const {
return IsFloat() || IsDouble();
}
friend std::ostream& operator<<(std::ostream& os,
const NeonImmediate& operand);
private:
union NeonImmediateType {
uint64_t u64_;
double d_;
uint32_t u32_;
float f_;
NeonImmediateType(uint64_t u) : u64_(u) {}
NeonImmediateType(int64_t u) : u64_(u) {}
NeonImmediateType(uint32_t u) : u32_(u) {}
NeonImmediateType(int32_t u) : u32_(u) {}
NeonImmediateType(double d) : d_(d) {}
NeonImmediateType(float f) : f_(f) {}
NeonImmediateType(const NeonImmediateType& ref) : u64_(ref.u64_) {}
} imm_;
DataType immediate_type_;
};
std::ostream& operator<<(std::ostream& os, const NeonImmediate& operand);
class NeonOperand {
public:
NeonOperand(int32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(int64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(uint64_t immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(float immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(double immediate) // NOLINT(runtime/explicit)
: imm_(immediate),
rm_(NoDReg) {}
NeonOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: imm_(imm),
rm_(NoDReg) {}
NeonOperand(const VRegister& rm) // NOLINT(runtime/explicit)
: imm_(0),
rm_(rm) {
VIXL_ASSERT(rm_.IsValid());
}
bool IsImmediate() const { return !rm_.IsValid(); }
bool IsRegister() const { return rm_.IsValid(); }
bool IsFloatZero() const {
VIXL_ASSERT(IsImmediate());
return imm_.IsFloatZero();
}
const NeonImmediate& GetNeonImmediate() const { return imm_; }
VRegister GetRegister() const {
VIXL_ASSERT(IsRegister());
return rm_;
}
protected:
NeonImmediate imm_;
VRegister rm_;
};
std::ostream& operator<<(std::ostream& os, const NeonOperand& operand);
// SOperand represents either an immediate or a SRegister.
class SOperand : public NeonOperand {
public:
// #<immediate>
// where <immediate> is 32bit int
// This is allowed to be an implicit constructor because SOperand is
// a wrapper class that doesn't normally perform any type conversion.
SOperand(int32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
SOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
// #<immediate>
// where <immediate> is 32bit float
SOperand(float immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
// where <immediate> is 64bit float
SOperand(double immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
SOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: NeonOperand(imm) {}
// rm
// This is allowed to be an implicit constructor because SOperand is
// a wrapper class that doesn't normally perform any type conversion.
SOperand(SRegister rm) // NOLINT(runtime/explicit)
: NeonOperand(rm) {}
SRegister GetRegister() const {
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kSRegister));
return SRegister(rm_.GetCode());
}
};
// DOperand represents either an immediate or a DRegister.
std::ostream& operator<<(std::ostream& os, const SOperand& operand);
class DOperand : public NeonOperand {
public:
// #<immediate>
// where <immediate> is uint32_t.
// This is allowed to be an implicit constructor because DOperand is
// a wrapper class that doesn't normally perform any type conversion.
DOperand(int32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(int64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(uint64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
// #<immediate>
// where <immediate> is a non zero floating point number which can be encoded
// as an 8 bit floating point (checked by the constructor).
// This is allowed to be an implicit constructor because DOperand is
// a wrapper class that doesn't normally perform any type conversion.
DOperand(float immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(double immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
DOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: NeonOperand(imm) {}
// rm
// This is allowed to be an implicit constructor because DOperand is
// a wrapper class that doesn't normally perform any type conversion.
DOperand(DRegister rm) // NOLINT(runtime/explicit)
: NeonOperand(rm) {}
DRegister GetRegister() const {
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kDRegister));
return DRegister(rm_.GetCode());
}
};
std::ostream& operator<<(std::ostream& os, const DOperand& operand);
// QOperand represents either an immediate or a QRegister.
class QOperand : public NeonOperand {
public:
// #<immediate>
// where <immediate> is uint32_t.
// This is allowed to be an implicit constructor because QOperand is
// a wrapper class that doesn't normally perform any type conversion.
QOperand(int32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(uint32_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(int64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(uint64_t immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(float immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(double immediate) // NOLINT(runtime/explicit)
: NeonOperand(immediate) {}
QOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
: NeonOperand(imm) {}
// rm
// This is allowed to be an implicit constructor because QOperand is
// a wrapper class that doesn't normally perform any type conversion.
QOperand(QRegister rm) // NOLINT(runtime/explicit)
: NeonOperand(rm) {
VIXL_ASSERT(rm_.IsValid());
}
QRegister GetRegister() const {
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kQRegister));
return QRegister(rm_.GetCode());
}
};
std::ostream& operator<<(std::ostream& os, const QOperand& operand);
class ImmediateVFP : public EncodingValue {
template <typename T>
struct FloatType {
typedef T base_type;
};
public:
explicit ImmediateVFP(const NeonImmediate& neon_imm) {
if (neon_imm.IsFloat()) {
const float imm = neon_imm.GetImmediate<float>();
if (VFP::IsImmFP32(imm)) {
SetEncodingValue(VFP::FP32ToImm8(imm));
}
} else if (neon_imm.IsDouble()) {
const double imm = neon_imm.GetImmediate<double>();
if (VFP::IsImmFP64(imm)) {
SetEncodingValue(VFP::FP64ToImm8(imm));
}
}
}
template <typename T>
static T Decode(uint32_t v) {
return Decode(v, FloatType<T>());
}
static float Decode(uint32_t imm8, const FloatType<float>&) {
return VFP::Imm8ToFP32(imm8);
}
static double Decode(uint32_t imm8, const FloatType<double>&) {
return VFP::Imm8ToFP64(imm8);
}
};
class ImmediateVbic : public EncodingValueAndImmediate {
public:
ImmediateVbic(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVand : public ImmediateVbic {
public:
ImmediateVand(DataType dt, const NeonImmediate neon_imm)
: ImmediateVbic(dt, neon_imm) {
if (IsValid()) {
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
}
}
};
class ImmediateVmov : public EncodingValueAndImmediate {
public:
ImmediateVmov(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVmvn : public EncodingValueAndImmediate {
public:
ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVorr : public EncodingValueAndImmediate {
public:
ImmediateVorr(DataType dt, const NeonImmediate& neon_imm);
static DataType DecodeDt(uint32_t cmode);
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
};
class ImmediateVorn : public ImmediateVorr {
public:
ImmediateVorn(DataType dt, const NeonImmediate& neon_imm)
: ImmediateVorr(dt, neon_imm) {
if (IsValid()) {
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
}
}
};
// MemOperand represents the addressing mode of a load or store instruction.
//
// Usage: <instr> <Rt> , <MemOperand>
//
// where <instr> is the instruction to use (e.g., Ldr(), Str(), etc.),
// <Rt> is general purpose register to be transferred,
// <MemOperand> is the rest of the arguments to the instruction
//
// <MemOperand> can be in one of 3 addressing modes:
//
// [ <Rn>, <offset> ] == offset addressing
// [ <Rn>, <offset> ]! == pre-indexed addressing
// [ <Rn> ], <offset> == post-indexed addressing
//
// where <offset> can be one of:
// - an immediate constant, such as <imm8>, <imm12>
// - an index register <Rm>
// - a shifted index register <Rm>, <shift> #<amount>
//
// The index register may have an associated {+/-} sign,
// which if ommitted, defaults to + .
//
// We have two constructors for the offset:
//
// One with a signed value offset parameter. The value of sign_ is
// "sign_of(constructor's offset parameter) and the value of offset_ is
// "constructor's offset parameter".
//
// The other with a sign and a positive value offset parameters. The value of
// sign_ is "constructor's sign parameter" and the value of offset_ is
// "constructor's sign parameter * constructor's offset parameter".
//
// The value of offset_ reflects the effective offset. For an offset_ of 0,
// sign_ can be positive or negative. Otherwise, sign_ always agrees with
// the sign of offset_.
class MemOperand {
public:
// rn
// where rn is the general purpose base register only
explicit MemOperand(Register rn, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(NoReg),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode | kMemOperandRegisterOnly) {
VIXL_ASSERT(rn_.IsValid());
}
// rn, #<imm>
// where rn is the general purpose base register,
// <imm> is a 32-bit offset to add to rn
//
// Note: if rn is PC, then this form is equivalent to a "label"
// Note: the second constructor allow minus zero (-0).
MemOperand(Register rn, int32_t offset, AddrMode addrmode = Offset)
: rn_(rn),
offset_(offset),
sign_((offset < 0) ? minus : plus),
rm_(NoReg),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid());
}
MemOperand(Register rn, Sign sign, int32_t offset, AddrMode addrmode = Offset)
: rn_(rn),
offset_(sign.IsPlus() ? offset : -offset),
sign_(sign),
rm_(NoReg),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid());
// With this constructor, the sign must only be specified by "sign".
VIXL_ASSERT(offset >= 0);
}
// rn, {+/-}rm
// where rn is the general purpose base register,
// {+/-} is the sign of the index register,
// rm is the general purpose index register,
MemOperand(Register rn, Sign sign, Register rm, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(sign),
rm_(rm),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
}
// rn, rm
// where rn is the general purpose base register,
// rm is the general purpose index register,
MemOperand(Register rn, Register rm, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(rm),
shift_(LSL),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
}
// rn, {+/-}rm, <shift>
// where rn is the general purpose base register,
// {+/-} is the sign of the index register,
// rm is the general purpose index register,
// <shift> is RRX, applied to value from rm
MemOperand(Register rn,
Sign sign,
Register rm,
Shift shift,
AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(sign),
rm_(rm),
shift_(shift),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
VIXL_ASSERT(shift_.IsRRX());
}
// rn, rm, <shift>
// where rn is the general purpose base register,
// rm is the general purpose index register,
// <shift> is RRX, applied to value from rm
MemOperand(Register rn, Register rm, Shift shift, AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(rm),
shift_(shift),
shift_amount_(0),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
VIXL_ASSERT(shift_.IsRRX());
}
// rn, {+/-}rm, <shift> #<amount>
// where rn is the general purpose base register,
// {+/-} is the sign of the index register,
// rm is the general purpose index register,
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
// <shift_amount> is optional size to apply to value from rm
MemOperand(Register rn,
Sign sign,
Register rm,
Shift shift,
uint32_t shift_amount,
AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(sign),
rm_(rm),
shift_(shift),
shift_amount_(shift_amount),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
CheckShift();
}
// rn, rm, <shift> #<amount>
// where rn is the general purpose base register,
// rm is the general purpose index register,
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
// <shift_amount> is optional size to apply to value from rm
MemOperand(Register rn,
Register rm,
Shift shift,
uint32_t shift_amount,
AddrMode addrmode = Offset)
: rn_(rn),
offset_(0),
sign_(plus),
rm_(rm),
shift_(shift),
shift_amount_(shift_amount),
addrmode_(addrmode) {
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
CheckShift();
}
Register GetBaseRegister() const { return rn_; }
int32_t GetOffsetImmediate() const { return offset_; }
bool IsOffsetImmediateWithinRange(int min,
int max,
int multiple_of = 1) const {
return (offset_ >= min) && (offset_ <= max) &&
((offset_ % multiple_of) == 0);
}
Sign GetSign() const { return sign_; }
Register GetOffsetRegister() const { return rm_; }
Shift GetShift() const { return shift_; }
unsigned GetShiftAmount() const { return shift_amount_; }
AddrMode GetAddrMode() const {
return static_cast<AddrMode>(addrmode_ & kMemOperandAddrModeMask);
}
bool IsRegisterOnly() const {
return (addrmode_ & kMemOperandRegisterOnly) != 0;
}
bool IsImmediate() const { return !rm_.IsValid(); }
bool IsImmediateZero() const { return !rm_.IsValid() && (offset_ == 0); }
bool IsPlainRegister() const {
return rm_.IsValid() && shift_.IsLSL() && (shift_amount_ == 0);
}
bool IsShiftedRegister() const { return rm_.IsValid(); }
bool IsImmediateOffset() const {
return (GetAddrMode() == Offset) && !rm_.IsValid();
}
bool IsImmediateZeroOffset() const {
return (GetAddrMode() == Offset) && !rm_.IsValid() && (offset_ == 0);
}
bool IsRegisterOffset() const {
return (GetAddrMode() == Offset) && rm_.IsValid() && shift_.IsLSL() &&
(shift_amount_ == 0);
}
bool IsShiftedRegisterOffset() const {
return (GetAddrMode() == Offset) && rm_.IsValid();
}
uint32_t GetTypeEncodingValue() const {
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
}
bool IsOffset() const { return GetAddrMode() == Offset; }
bool IsPreIndex() const { return GetAddrMode() == PreIndex; }
bool IsPostIndex() const { return GetAddrMode() == PostIndex; }
bool IsShiftValid() const { return shift_.IsValidAmount(shift_amount_); }
private:
static const int kMemOperandRegisterOnly = 0x1000;
static const int kMemOperandAddrModeMask = 0xfff;
void CheckShift() {
#ifdef VIXL_DEBUG
// Disallow any zero shift other than RRX #0 and LSL #0 .
if ((shift_amount_ == 0) && shift_.IsRRX()) return;
if ((shift_amount_ == 0) && !shift_.IsLSL()) {
VIXL_ABORT_WITH_MSG(
"A shift by 0 is only accepted in "
"the case of lsl and will be treated as "
"no shift.\n");
}
switch (shift_.GetType()) {
case LSL:
VIXL_ASSERT(shift_amount_ <= 31);
break;
case ROR:
VIXL_ASSERT(shift_amount_ <= 31);
break;
case LSR:
case ASR:
VIXL_ASSERT(shift_amount_ <= 32);
break;
case RRX:
default:
VIXL_UNREACHABLE();
break;
}
#endif
}
Register rn_;
int32_t offset_;
Sign sign_;
Register rm_;
Shift shift_;
uint32_t shift_amount_;
uint32_t addrmode_;
};
std::ostream& operator<<(std::ostream& os, const MemOperand& operand);
class AlignedMemOperand : public MemOperand {
public:
AlignedMemOperand(Register rn, Alignment align, AddrMode addrmode = Offset)
: MemOperand(rn, addrmode), align_(align) {
VIXL_ASSERT(addrmode != PreIndex);
}
AlignedMemOperand(Register rn,
Alignment align,
Register rm,
AddrMode addrmode)
: MemOperand(rn, rm, addrmode), align_(align) {
VIXL_ASSERT(addrmode != PreIndex);
}
Alignment GetAlignment() const { return align_; }
private:
Alignment align_;
};
std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand);
} // namespace aarch32
} // namespace vixl
#endif // VIXL_AARCH32_OPERANDS_AARCH32_H_

View file

@ -0,0 +1,167 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The ABI features are only supported with C++11 or later.
#if __cplusplus >= 201103L
// This should not be defined manually.
#define VIXL_HAS_ABI_SUPPORT
#elif defined(VIXL_HAS_ABI_SUPPORT)
#error "The ABI support requires C++11 or later."
#endif
#ifdef VIXL_HAS_ABI_SUPPORT
#ifndef VIXL_AARCH64_ABI_AARCH64_H_
#define VIXL_AARCH64_ABI_AARCH64_H_
#include <algorithm>
#include <type_traits>
#include "../globals-vixl.h"
#include "instructions-aarch64.h"
#include "operands-aarch64.h"
namespace vixl {
namespace aarch64 {
// Class describing the AArch64 procedure call standard, as defined in "ARM
// Procedure Call Standard for the ARM 64-bit Architecture (AArch64)",
// release 1.0 (AAPCS below).
//
// The stages in the comments match the description in that document.
//
// Stage B does not apply to arguments handled by this class.
class ABI {
public:
explicit ABI(Register stack_pointer = sp) : stack_pointer_(stack_pointer) {
// Stage A - Initialization
Reset();
}
void Reset() {
NGRN_ = 0;
NSRN_ = 0;
stack_offset_ = 0;
}
int GetStackSpaceRequired() { return stack_offset_; }
// The logic is described in section 5.5 of the AAPCS.
template <typename T>
GenericOperand GetReturnGenericOperand() const {
ABI abi(stack_pointer_);
GenericOperand result = abi.GetNextParameterGenericOperand<T>();
VIXL_ASSERT(result.IsCPURegister());
return result;
}
// The logic is described in section 5.4.2 of the AAPCS.
// The `GenericOperand` returned describes the location reserved for the
// argument from the point of view of the callee.
template <typename T>
GenericOperand GetNextParameterGenericOperand() {
const bool is_floating_point_type = std::is_floating_point<T>::value;
const bool is_integral_type =
std::is_integral<T>::value || std::is_enum<T>::value;
const bool is_pointer_type = std::is_pointer<T>::value;
int type_alignment = std::alignment_of<T>::value;
// We only support basic types.
VIXL_ASSERT(is_floating_point_type || is_integral_type || is_pointer_type);
// To ensure we get the correct type of operand when simulating on a 32-bit
// host, force the size of pointer types to the native AArch64 pointer size.
unsigned size = is_pointer_type ? 8 : sizeof(T);
// The size of the 'operand' reserved for the argument.
unsigned operand_size = AlignUp(size, kWRegSizeInBytes);
if (size > 8) {
VIXL_UNIMPLEMENTED();
return GenericOperand();
}
// Stage C.1
if (is_floating_point_type && (NSRN_ < 8)) {
return GenericOperand(FPRegister(NSRN_++, size * kBitsPerByte));
}
// Stages C.2, C.3, and C.4: Unsupported. Caught by the assertions above.
// Stages C.5 and C.6
if (is_floating_point_type) {
VIXL_STATIC_ASSERT(
!is_floating_point_type ||
(std::is_same<T, float>::value || std::is_same<T, double>::value));
int offset = stack_offset_;
stack_offset_ += 8;
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
}
// Stage C.7
if ((is_integral_type || is_pointer_type) && (size <= 8) && (NGRN_ < 8)) {
return GenericOperand(Register(NGRN_++, operand_size * kBitsPerByte));
}
// Stage C.8
if (type_alignment == 16) {
NGRN_ = AlignUp(NGRN_, 2);
}
// Stage C.9
if (is_integral_type && (size == 16) && (NGRN_ < 7)) {
VIXL_UNIMPLEMENTED();
return GenericOperand();
}
// Stage C.10: Unsupported. Caught by the assertions above.
// Stage C.11
NGRN_ = 8;
// Stage C.12
stack_offset_ = AlignUp(stack_offset_, std::max(type_alignment, 8));
// Stage C.13: Unsupported. Caught by the assertions above.
// Stage C.14
VIXL_ASSERT(size <= 8u);
size = std::max(size, 8u);
int offset = stack_offset_;
stack_offset_ += size;
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
}
private:
Register stack_pointer_;
// Next General-purpose Register Number.
int NGRN_;
// Next SIMD and Floating-point Register Number.
int NSRN_;
// The acronym "NSAA" used in the standard refers to the "Next Stacked
// Argument Address". Here we deal with offsets from the stack pointer.
int stack_offset_;
};
template <>
inline GenericOperand ABI::GetReturnGenericOperand<void>() const {
return GenericOperand();
}
}
} // namespace vixl::aarch64
#endif // VIXL_AARCH64_ABI_AARCH64_H_
#endif // VIXL_HAS_ABI_SUPPORT

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,86 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CPU_AARCH64_H
#define VIXL_CPU_AARCH64_H
#include "../globals-vixl.h"
#include "instructions-aarch64.h"
namespace vixl {
namespace aarch64 {
class CPU {
public:
// Initialise CPU support.
static void SetUp();
// Ensures the data at a given address and with a given size is the same for
// the I and D caches. I and D caches are not automatically coherent on ARM
// so this operation is required before any dynamically generated code can
// safely run.
static void EnsureIAndDCacheCoherency(void *address, size_t length);
// Handle tagged pointers.
template <typename T>
static T SetPointerTag(T pointer, uint64_t tag) {
VIXL_ASSERT(IsUintN(kAddressTagWidth, tag));
// Use C-style casts to get static_cast behaviour for integral types (T),
// and reinterpret_cast behaviour for other types.
uint64_t raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
return (T)raw;
}
template <typename T>
static uint64_t GetPointerTag(T pointer) {
// Use C-style casts to get static_cast behaviour for integral types (T),
// and reinterpret_cast behaviour for other types.
uint64_t raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
return (raw & kAddressTagMask) >> kAddressTagOffset;
}
private:
// Return the content of the cache type register.
static uint32_t GetCacheType();
// I and D cache line size in bytes.
static unsigned icache_line_size_;
static unsigned dcache_line_size_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_CPU_AARCH64_H

View file

@ -0,0 +1,125 @@
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Arm Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
#define VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
#include <iostream>
#include "../cpu-features.h"
#include "decoder-aarch64.h"
namespace vixl {
namespace aarch64 {
// This visitor records the CPU features that each decoded instruction requires.
// It provides:
// - the set of CPU features required by the most recently decoded instruction,
// - a cumulative set of encountered CPU features,
// - an optional list of 'available' CPU features.
//
// Primarily, this allows the Disassembler and Simulator to share the same CPU
// features logic. However, it can be used standalone to scan code blocks for
// CPU features.
class CPUFeaturesAuditor : public DecoderVisitor {
public:
// Construction arguments:
// - If a decoder is specified, the CPUFeaturesAuditor automatically
// registers itself as a visitor. Otherwise, this can be done manually.
//
// - If an `available` features list is provided, it is used as a hint in
// cases where instructions may be provided by multiple separate features.
// An example of this is FP&SIMD loads and stores: some of these are used
// in both FP and integer SIMD code. If exactly one of those features is
// in `available` when one of these instructions is encountered, then the
// auditor will record that feature. Otherwise, it will record _both_
// features.
explicit CPUFeaturesAuditor(
Decoder* decoder, const CPUFeatures& available = CPUFeatures::None())
: available_(available), decoder_(decoder) {
if (decoder_ != NULL) decoder_->AppendVisitor(this);
}
explicit CPUFeaturesAuditor(
const CPUFeatures& available = CPUFeatures::None())
: available_(available), decoder_(NULL) {}
virtual ~CPUFeaturesAuditor() {
if (decoder_ != NULL) decoder_->RemoveVisitor(this);
}
void ResetSeenFeatures() {
seen_ = CPUFeatures::None();
last_instruction_ = CPUFeatures::None();
}
// Query or set available CPUFeatures.
const CPUFeatures& GetAvailableFeatures() const { return available_; }
void SetAvailableFeatures(const CPUFeatures& available) {
available_ = available;
}
// Query CPUFeatures seen since construction (or the last call to `Reset()`).
const CPUFeatures& GetSeenFeatures() const { return seen_; }
// Query CPUFeatures from the last instruction visited by this auditor.
const CPUFeatures& GetInstructionFeatures() const {
return last_instruction_;
}
bool InstructionIsAvailable() const {
return available_.Has(last_instruction_);
}
// The common CPUFeatures interface operates on the available_ list.
CPUFeatures* GetCPUFeatures() { return &available_; }
void SetCPUFeatures(const CPUFeatures& available) {
SetAvailableFeatures(available);
}
// Declare all Visitor functions.
#define DECLARE(A) \
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
class RecordInstructionFeaturesScope;
void LoadStoreHelper(const Instruction* instr);
void LoadStorePairHelper(const Instruction* instr);
CPUFeatures seen_;
CPUFeatures last_instruction_;
CPUFeatures available_;
Decoder* decoder_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_

View file

@ -0,0 +1,290 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_DECODER_AARCH64_H_
#define VIXL_AARCH64_DECODER_AARCH64_H_
#include <list>
#include "../globals-vixl.h"
#include "instructions-aarch64.h"
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST_THAT_RETURN(V) \
V(AddSubExtended) \
V(AddSubImmediate) \
V(AddSubShifted) \
V(AddSubWithCarry) \
V(AtomicMemory) \
V(Bitfield) \
V(CompareBranch) \
V(ConditionalBranch) \
V(ConditionalCompareImmediate) \
V(ConditionalCompareRegister) \
V(ConditionalSelect) \
V(Crypto2RegSHA) \
V(Crypto3RegSHA) \
V(CryptoAES) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(Exception) \
V(Extract) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPFixedPointConvert) \
V(FPImmediate) \
V(FPIntegerConvert) \
V(LoadLiteral) \
V(LoadStoreExclusive) \
V(LoadStorePairNonTemporal) \
V(LoadStorePairOffset) \
V(LoadStorePairPostIndex) \
V(LoadStorePairPreIndex) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnscaledOffset) \
V(LoadStoreUnsignedOffset) \
V(LogicalImmediate) \
V(LogicalShifted) \
V(MoveWideImmediate) \
V(NEON2RegMisc) \
V(NEON2RegMiscFP16) \
V(NEON3Different) \
V(NEON3Same) \
V(NEON3SameExtra) \
V(NEON3SameFP16) \
V(NEONAcrossLanes) \
V(NEONByIndexedElement) \
V(NEONCopy) \
V(NEONExtract) \
V(NEONLoadStoreMultiStruct) \
V(NEONLoadStoreMultiStructPostIndex) \
V(NEONLoadStoreSingleStruct) \
V(NEONLoadStoreSingleStructPostIndex) \
V(NEONModifiedImmediate) \
V(NEONPerm) \
V(NEONScalar2RegMisc) \
V(NEONScalar2RegMiscFP16) \
V(NEONScalar3Diff) \
V(NEONScalar3Same) \
V(NEONScalar3SameExtra) \
V(NEONScalar3SameFP16) \
V(NEONScalarByIndexedElement) \
V(NEONScalarCopy) \
V(NEONScalarPairwise) \
V(NEONScalarShiftImmediate) \
V(NEONShiftImmediate) \
V(NEONTable) \
V(PCRelAddressing) \
V(System) \
V(TestBranch) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister)
#define VISITOR_LIST_THAT_DONT_RETURN(V) \
V(Unallocated) \
V(Unimplemented)
#define VISITOR_LIST(V) \
VISITOR_LIST_THAT_RETURN(V) \
VISITOR_LIST_THAT_DONT_RETURN(V)
namespace vixl {
namespace aarch64 {
// The Visitor interface. Disassembler and simulator (and other tools)
// must provide implementations for all of these functions.
class DecoderVisitor {
public:
enum VisitorConstness { kConstVisitor, kNonConstVisitor };
explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
: constness_(constness) {}
virtual ~DecoderVisitor() {}
#define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
VISITOR_LIST(DECLARE)
#undef DECLARE
bool IsConstVisitor() const { return constness_ == kConstVisitor; }
Instruction* MutableInstruction(const Instruction* instr) {
VIXL_ASSERT(!IsConstVisitor());
return const_cast<Instruction*>(instr);
}
private:
const VisitorConstness constness_;
};
class Decoder {
public:
Decoder() {}
// Top-level wrappers around the actual decoding function.
void Decode(const Instruction* instr) {
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
VIXL_ASSERT((*it)->IsConstVisitor());
}
DecodeInstruction(instr);
}
void Decode(Instruction* instr) {
DecodeInstruction(const_cast<const Instruction*>(instr));
}
// Decode all instructions from start (inclusive) to end (exclusive).
template <typename T>
void Decode(T start, T end) {
for (T instr = start; instr < end; instr = instr->GetNextInstruction()) {
Decode(instr);
}
}
// Register a new visitor class with the decoder.
// Decode() will call the corresponding visitor method from all registered
// visitor classes when decoding reaches the leaf node of the instruction
// decode tree.
// Visitors are called in order.
// A visitor can be registered multiple times.
//
// d.AppendVisitor(V1);
// d.AppendVisitor(V2);
// d.PrependVisitor(V2);
// d.AppendVisitor(V3);
//
// d.Decode(i);
//
// will call in order visitor methods in V2, V1, V2, V3.
void AppendVisitor(DecoderVisitor* visitor);
void PrependVisitor(DecoderVisitor* visitor);
// These helpers register `new_visitor` before or after the first instance of
// `registered_visiter` in the list.
// So if
// V1, V2, V1, V2
// are registered in this order in the decoder, calls to
// d.InsertVisitorAfter(V3, V1);
// d.InsertVisitorBefore(V4, V2);
// will yield the order
// V1, V3, V4, V2, V1, V2
//
// For more complex modifications of the order of registered visitors, one can
// directly access and modify the list of visitors via the `visitors()'
// accessor.
void InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
void InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
// Remove all instances of a previously registered visitor class from the list
// of visitors stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
#define DECLARE(A) void Visit##A(const Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
std::list<DecoderVisitor*>* visitors() { return &visitors_; }
private:
// Decodes an instruction and calls the visitor functions registered with the
// Decoder class.
void DecodeInstruction(const Instruction* instr);
// Decode the PC relative addressing instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x0.
void DecodePCRelAddressing(const Instruction* instr);
// Decode the add/subtract immediate instruction, and call the correspoding
// visitors.
// On entry, instruction bits 27:24 = 0x1.
void DecodeAddSubImmediate(const Instruction* instr);
// Decode the branch, system command, and exception generation parts of
// the instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
void DecodeBranchSystemException(const Instruction* instr);
// Decode the load and store parts of the instruction tree, and call
// the corresponding visitors.
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
void DecodeLoadStore(const Instruction* instr);
// Decode the logical immediate and move wide immediate parts of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x2.
void DecodeLogical(const Instruction* instr);
// Decode the bitfield and extraction parts of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x3.
void DecodeBitfieldExtract(const Instruction* instr);
// Decode the data processing parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
void DecodeDataProcessing(const Instruction* instr);
// Decode the floating point parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0xE, 0xF}.
void DecodeFP(const Instruction* instr);
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeNEONLoadStore(const Instruction* instr);
// Decode the Advanced SIMD (NEON) vector data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0x7.
void DecodeNEONVectorDataProcessing(const Instruction* instr);
// Decode the Advanced SIMD (NEON) scalar data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0xF.
void DecodeNEONScalarDataProcessing(const Instruction* instr);
private:
// Visitors are registered in a list.
std::list<DecoderVisitor*> visitors_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_DECODER_AARCH64_H_

View file

@ -0,0 +1,217 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_DISASM_AARCH64_H
#define VIXL_AARCH64_DISASM_AARCH64_H
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "cpu-features-auditor-aarch64.h"
#include "decoder-aarch64.h"
#include "instructions-aarch64.h"
#include "operands-aarch64.h"
namespace vixl {
namespace aarch64 {
class Disassembler : public DecoderVisitor {
public:
Disassembler();
Disassembler(char* text_buffer, int buffer_size);
virtual ~Disassembler();
char* GetOutput();
// Declare all Visitor functions.
#define DECLARE(A) \
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
VISITOR_LIST(DECLARE)
#undef DECLARE
protected:
virtual void ProcessOutput(const Instruction* instr);
// Default output functions. The functions below implement a default way of
// printing elements in the disassembly. A sub-class can override these to
// customize the disassembly output.
// Prints the name of a register.
// TODO: This currently doesn't allow renaming of V registers.
virtual void AppendRegisterNameToOutput(const Instruction* instr,
const CPURegister& reg);
// Prints a PC-relative offset. This is used for example when disassembling
// branches to immediate offsets.
virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
int64_t offset);
// Prints an address, in the general case. It can be code or data. This is
// used for example to print the target address of an ADR instruction.
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
const void* addr);
// Prints the address of some code.
// This is used for example to print the target address of a branch to an
// immediate offset.
// A sub-class can for example override this method to lookup the address and
// print an appropriate name.
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
const void* addr);
// Prints the address of some data.
// This is used for example to print the source address of a load literal
// instruction.
virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
const void* addr);
// Same as the above, but for addresses that are not relative to the code
// buffer. They are currently not used by VIXL.
virtual void AppendAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendCodeAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendDataAddressToOutput(const Instruction* instr,
const void* addr);
public:
// Get/Set the offset that should be added to code addresses when printing
// code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
// helpers.
// Below is an example of how a branch immediate instruction in memory at
// address 0xb010200 would disassemble with different offsets.
// Base address | Disassembly
// 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
// 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
// 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
int64_t CodeRelativeAddress(const void* instr);
private:
void Format(const Instruction* instr,
const char* mnemonic,
const char* format);
void Substitute(const Instruction* instr, const char* string);
int SubstituteField(const Instruction* instr, const char* format);
int SubstituteRegisterField(const Instruction* instr, const char* format);
int SubstituteImmediateField(const Instruction* instr, const char* format);
int SubstituteLiteralField(const Instruction* instr, const char* format);
int SubstituteBitfieldImmediateField(const Instruction* instr,
const char* format);
int SubstituteShiftField(const Instruction* instr, const char* format);
int SubstituteExtendField(const Instruction* instr, const char* format);
int SubstituteConditionField(const Instruction* instr, const char* format);
int SubstitutePCRelAddressField(const Instruction* instr, const char* format);
int SubstituteBranchTargetField(const Instruction* instr, const char* format);
int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
int SubstitutePrefetchField(const Instruction* instr, const char* format);
int SubstituteBarrierField(const Instruction* instr, const char* format);
int SubstituteSysOpField(const Instruction* instr, const char* format);
int SubstituteCrField(const Instruction* instr, const char* format);
bool RdIsZROrSP(const Instruction* instr) const {
return (instr->GetRd() == kZeroRegCode);
}
bool RnIsZROrSP(const Instruction* instr) const {
return (instr->GetRn() == kZeroRegCode);
}
bool RmIsZROrSP(const Instruction* instr) const {
return (instr->GetRm() == kZeroRegCode);
}
bool RaIsZROrSP(const Instruction* instr) const {
return (instr->GetRa() == kZeroRegCode);
}
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
int64_t code_address_offset() const { return code_address_offset_; }
protected:
void ResetOutput();
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
void set_code_address_offset(int64_t code_address_offset) {
code_address_offset_ = code_address_offset;
}
char* buffer_;
uint32_t buffer_pos_;
uint32_t buffer_size_;
bool own_buffer_;
int64_t code_address_offset_;
};
class PrintDisassembler : public Disassembler {
public:
explicit PrintDisassembler(FILE* stream)
: cpu_features_auditor_(NULL),
cpu_features_prefix_("// Needs: "),
cpu_features_suffix_(""),
stream_(stream) {}
// Convenience helpers for quick disassembly, without having to manually
// create a decoder.
void DisassembleBuffer(const Instruction* start, uint64_t size);
void DisassembleBuffer(const Instruction* start, const Instruction* end);
void Disassemble(const Instruction* instr);
// If a CPUFeaturesAuditor is specified, it will be used to annotate
// disassembly. The CPUFeaturesAuditor is expected to visit the instructions
// _before_ the disassembler, such that the CPUFeatures information is
// available when the disassembler is called.
void RegisterCPUFeaturesAuditor(CPUFeaturesAuditor* auditor) {
cpu_features_auditor_ = auditor;
}
// Set the prefix to appear before the CPU features annotations.
void SetCPUFeaturesPrefix(const char* prefix) {
VIXL_ASSERT(prefix != NULL);
cpu_features_prefix_ = prefix;
}
// Set the suffix to appear after the CPU features annotations.
void SetCPUFeaturesSuffix(const char* suffix) {
VIXL_ASSERT(suffix != NULL);
cpu_features_suffix_ = suffix;
}
protected:
virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE;
CPUFeaturesAuditor* cpu_features_auditor_;
const char* cpu_features_prefix_;
const char* cpu_features_suffix_;
private:
FILE* stream_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_DISASM_AARCH64_H

View file

@ -0,0 +1,865 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
#define VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "constants-aarch64.h"
namespace vixl {
namespace aarch64 {
// ISA constants. --------------------------------------------------------------
typedef uint32_t Instr;
const unsigned kInstructionSize = 4;
const unsigned kInstructionSizeLog2 = 2;
const unsigned kLiteralEntrySize = 4;
const unsigned kLiteralEntrySizeLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MBytes;
// This is the nominal page size (as used by the adrp instruction); the actual
// size of the memory pages allocated by the kernel is likely to differ.
const unsigned kPageSize = 4 * KBytes;
const unsigned kPageSizeLog2 = 12;
const unsigned kBRegSize = 8;
const unsigned kBRegSizeLog2 = 3;
const unsigned kBRegSizeInBytes = kBRegSize / 8;
const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
const unsigned kHRegSize = 16;
const unsigned kHRegSizeLog2 = 4;
const unsigned kHRegSizeInBytes = kHRegSize / 8;
const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
const unsigned kWRegSize = 32;
const unsigned kWRegSizeLog2 = 5;
const unsigned kWRegSizeInBytes = kWRegSize / 8;
const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
const unsigned kXRegSize = 64;
const unsigned kXRegSizeLog2 = 6;
const unsigned kXRegSizeInBytes = kXRegSize / 8;
const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
const unsigned kSRegSize = 32;
const unsigned kSRegSizeLog2 = 5;
const unsigned kSRegSizeInBytes = kSRegSize / 8;
const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
const unsigned kDRegSize = 64;
const unsigned kDRegSizeLog2 = 6;
const unsigned kDRegSizeInBytes = kDRegSize / 8;
const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
const unsigned kQRegSize = 128;
const unsigned kQRegSizeLog2 = 7;
const unsigned kQRegSizeInBytes = kQRegSize / 8;
const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
const uint64_t kWRegMask = UINT64_C(0xffffffff);
const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
const uint64_t kHRegMask = UINT64_C(0xffff);
const uint64_t kSRegMask = UINT64_C(0xffffffff);
const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
const uint64_t kSSignMask = UINT64_C(0x80000000);
const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
const uint64_t kWSignMask = UINT64_C(0x80000000);
const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
const uint64_t kByteMask = UINT64_C(0xff);
const uint64_t kHalfWordMask = UINT64_C(0xffff);
const uint64_t kWordMask = UINT64_C(0xffffffff);
const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
const uint64_t kHMaxUInt = UINT64_C(0xffff);
// Define k*MinInt with "-k*MaxInt - 1", because the hexadecimal representation
// (e.g. "INT32_C(0x80000000)") has implementation-defined behaviour.
const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
const int64_t kXMinInt = -kXMaxInt - 1;
const int32_t kWMaxInt = INT32_C(0x7fffffff);
const int32_t kWMinInt = -kWMaxInt - 1;
const int16_t kHMaxInt = INT16_C(0x7fff);
const int16_t kHMinInt = -kHMaxInt - 1;
const unsigned kFpRegCode = 29;
const unsigned kLinkRegCode = 30;
const unsigned kSpRegCode = 31;
const unsigned kZeroRegCode = 31;
const unsigned kSPRegInternalCode = 63;
const unsigned kRegCodeMask = 0x1f;
const unsigned kAddressTagOffset = 56;
const unsigned kAddressTagWidth = 8;
const uint64_t kAddressTagMask = ((UINT64_C(1) << kAddressTagWidth) - 1)
<< kAddressTagOffset;
VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
const uint64_t kTTBRMask = UINT64_C(1) << 55;
// Make these moved float constants backwards compatible
// with explicit vixl::aarch64:: namespace references.
using vixl::kDoubleMantissaBits;
using vixl::kDoubleExponentBits;
using vixl::kFloatMantissaBits;
using vixl::kFloatExponentBits;
using vixl::kFloat16MantissaBits;
using vixl::kFloat16ExponentBits;
using vixl::kFP16PositiveInfinity;
using vixl::kFP16NegativeInfinity;
using vixl::kFP32PositiveInfinity;
using vixl::kFP32NegativeInfinity;
using vixl::kFP64PositiveInfinity;
using vixl::kFP64NegativeInfinity;
using vixl::kFP16DefaultNaN;
using vixl::kFP32DefaultNaN;
using vixl::kFP64DefaultNaN;
unsigned CalcLSDataSize(LoadStoreOp op);
unsigned CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
CondBranchType = 1,
UncondBranchType = 2,
CompareBranchType = 3,
TestBranchType = 4
};
enum AddrMode { Offset, PreIndex, PostIndex };
enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
// Instructions. ---------------------------------------------------------------
class Instruction {
public:
Instr GetInstructionBits() const {
return *(reinterpret_cast<const Instr*>(this));
}
VIXL_DEPRECATED("GetInstructionBits", Instr InstructionBits() const) {
return GetInstructionBits();
}
void SetInstructionBits(Instr new_instr) {
*(reinterpret_cast<Instr*>(this)) = new_instr;
}
int ExtractBit(int pos) const { return (GetInstructionBits() >> pos) & 1; }
VIXL_DEPRECATED("ExtractBit", int Bit(int pos) const) {
return ExtractBit(pos);
}
uint32_t ExtractBits(int msb, int lsb) const {
return ExtractUnsignedBitfield32(msb, lsb, GetInstructionBits());
}
VIXL_DEPRECATED("ExtractBits", uint32_t Bits(int msb, int lsb) const) {
return ExtractBits(msb, lsb);
}
int32_t ExtractSignedBits(int msb, int lsb) const {
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
return ExtractSignedBitfield32(msb, lsb, bits);
}
VIXL_DEPRECATED("ExtractSignedBits",
int32_t SignedBits(int msb, int lsb) const) {
return ExtractSignedBits(msb, lsb);
}
Instr Mask(uint32_t mask) const {
VIXL_ASSERT(mask != 0);
return GetInstructionBits() & mask;
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int32_t Get##Name() const { return this->Func(HighBit, LowBit); } \
VIXL_DEPRECATED("Get" #Name, int32_t Name() const) { return Get##Name(); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi.
int GetImmPCRel() const {
uint32_t hi = static_cast<uint32_t>(GetImmPCRelHi());
uint32_t lo = GetImmPCRelLo();
uint32_t offset = (hi << ImmPCRelLo_width) | lo;
int width = ImmPCRelLo_width + ImmPCRelHi_width;
return ExtractSignedBitfield32(width - 1, 0, offset);
}
VIXL_DEPRECATED("GetImmPCRel", int ImmPCRel() const) { return GetImmPCRel(); }
uint64_t GetImmLogical() const;
VIXL_DEPRECATED("GetImmLogical", uint64_t ImmLogical() const) {
return GetImmLogical();
}
unsigned GetImmNEONabcdefgh() const;
VIXL_DEPRECATED("GetImmNEONabcdefgh", unsigned ImmNEONabcdefgh() const) {
return GetImmNEONabcdefgh();
}
Float16 GetImmFP16() const;
float GetImmFP32() const;
VIXL_DEPRECATED("GetImmFP32", float ImmFP32() const) { return GetImmFP32(); }
double GetImmFP64() const;
VIXL_DEPRECATED("GetImmFP64", double ImmFP64() const) { return GetImmFP64(); }
Float16 GetImmNEONFP16() const;
float GetImmNEONFP32() const;
VIXL_DEPRECATED("GetImmNEONFP32", float ImmNEONFP32() const) {
return GetImmNEONFP32();
}
double GetImmNEONFP64() const;
VIXL_DEPRECATED("GetImmNEONFP64", double ImmNEONFP64() const) {
return GetImmNEONFP64();
}
unsigned GetSizeLS() const {
return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
}
VIXL_DEPRECATED("GetSizeLS", unsigned SizeLS() const) { return GetSizeLS(); }
unsigned GetSizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
VIXL_DEPRECATED("GetSizeLSPair", unsigned SizeLSPair() const) {
return GetSizeLSPair();
}
int GetNEONLSIndex(int access_size_shift) const {
int64_t q = GetNEONQ();
int64_t s = GetNEONS();
int64_t size = GetNEONLSSize();
int64_t index = (q << 3) | (s << 2) | size;
return static_cast<int>(index >> access_size_shift);
}
VIXL_DEPRECATED("GetNEONLSIndex",
int NEONLSIndex(int access_size_shift) const) {
return GetNEONLSIndex(access_size_shift);
}
// Helpers.
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
}
bool IsUncondBranchImm() const {
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
}
bool IsCompareBranch() const {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; }
bool IsImmBranch() const { return GetBranchType() != UnknownBranchType; }
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
bool IsAddSubImmediate() const {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
}
bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
}
bool IsLoadOrStore() const {
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
}
bool IsLoad() const;
bool IsStore() const;
bool IsLoadLiteral() const {
// This includes PRFM_lit.
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
bool IsMovn() const {
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
(Mask(MoveWideImmediateMask) == MOVN_w);
}
static int GetImmBranchRangeBitwidth(ImmBranchType branch_type);
VIXL_DEPRECATED(
"GetImmBranchRangeBitwidth",
static int ImmBranchRangeBitwidth(ImmBranchType branch_type)) {
return GetImmBranchRangeBitwidth(branch_type);
}
static int32_t GetImmBranchForwardRange(ImmBranchType branch_type);
VIXL_DEPRECATED(
"GetImmBranchForwardRange",
static int32_t ImmBranchForwardRange(ImmBranchType branch_type)) {
return GetImmBranchForwardRange(branch_type);
}
static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode GetRdMode() const {
// The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
// Otherwise, r31 is the zero register.
if (IsAddSubImmediate() || IsAddSubExtended()) {
if (Mask(AddSubSetFlagsBit)) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
// can set the flags. The others can all write into sp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
return Reg31IsZeroRegister;
}
VIXL_DEPRECATED("GetRdMode", Reg31Mode RdMode() const) { return GetRdMode(); }
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode GetRnMode() const {
// The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).
// Otherwise, r31 is the zero register.
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
return Reg31IsStackPointer;
}
return Reg31IsZeroRegister;
}
VIXL_DEPRECATED("GetRnMode", Reg31Mode RnMode() const) { return GetRnMode(); }
ImmBranchType GetBranchType() const {
if (IsCondBranchImm()) {
return CondBranchType;
} else if (IsUncondBranchImm()) {
return UncondBranchType;
} else if (IsCompareBranch()) {
return CompareBranchType;
} else if (IsTestBranch()) {
return TestBranchType;
} else {
return UnknownBranchType;
}
}
VIXL_DEPRECATED("GetBranchType", ImmBranchType BranchType() const) {
return GetBranchType();
}
// Find the target of this instruction. 'this' may be a branch or a
// PC-relative addressing instruction.
const Instruction* GetImmPCOffsetTarget() const;
VIXL_DEPRECATED("GetImmPCOffsetTarget",
const Instruction* ImmPCOffsetTarget() const) {
return GetImmPCOffsetTarget();
}
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(const Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(const Instruction* source);
// The range of a load literal instruction, expressed as 'instr +- range'.
// The range is actually the 'positive' range; the branch instruction can
// target [instr - range - kInstructionSize, instr + range].
static const int kLoadLiteralImmBitwidth = 19;
static const int kLoadLiteralRange =
(1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
// Calculate the address of a literal referred to by a load-literal
// instruction, and return it as the specified type.
//
// The literal itself is safely mutable only if the backing buffer is safely
// mutable.
template <typename T>
T GetLiteralAddress() const {
uint64_t base_raw = reinterpret_cast<uint64_t>(this);
int64_t offset = GetImmLLiteral() * static_cast<int>(kLiteralEntrySize);
uint64_t address_raw = base_raw + offset;
// Cast the address using a C-style cast. A reinterpret_cast would be
// appropriate, but it can't cast one integral type to another.
T address = (T)(address_raw);
// Assert that the address can be represented by the specified type.
VIXL_ASSERT((uint64_t)(address) == address_raw);
return address;
}
template <typename T>
VIXL_DEPRECATED("GetLiteralAddress", T LiteralAddress() const) {
return GetLiteralAddress<T>();
}
uint32_t GetLiteral32() const {
uint32_t literal;
memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
return literal;
}
VIXL_DEPRECATED("GetLiteral32", uint32_t Literal32() const) {
return GetLiteral32();
}
uint64_t GetLiteral64() const {
uint64_t literal;
memcpy(&literal, GetLiteralAddress<const void*>(), sizeof(literal));
return literal;
}
VIXL_DEPRECATED("GetLiteral64", uint64_t Literal64() const) {
return GetLiteral64();
}
float GetLiteralFP32() const { return RawbitsToFloat(GetLiteral32()); }
VIXL_DEPRECATED("GetLiteralFP32", float LiteralFP32() const) {
return GetLiteralFP32();
}
double GetLiteralFP64() const { return RawbitsToDouble(GetLiteral64()); }
VIXL_DEPRECATED("GetLiteralFP64", double LiteralFP64() const) {
return GetLiteralFP64();
}
Instruction* GetNextInstruction() { return this + kInstructionSize; }
const Instruction* GetNextInstruction() const {
return this + kInstructionSize;
}
VIXL_DEPRECATED("GetNextInstruction",
const Instruction* NextInstruction() const) {
return GetNextInstruction();
}
const Instruction* GetInstructionAtOffset(int64_t offset) const {
VIXL_ASSERT(IsWordAligned(this + offset));
return this + offset;
}
VIXL_DEPRECATED("GetInstructionAtOffset",
const Instruction* InstructionAtOffset(int64_t offset)
const) {
return GetInstructionAtOffset(offset);
}
template <typename T>
static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
template <typename T>
static const Instruction* CastConst(T src) {
return reinterpret_cast<const Instruction*>(src);
}
private:
int GetImmBranch() const;
static Float16 Imm8ToFloat16(uint32_t imm8);
static float Imm8ToFP32(uint32_t imm8);
static double Imm8ToFP64(uint32_t imm8);
void SetPCRelImmTarget(const Instruction* target);
void SetBranchImmTarget(const Instruction* target);
};
// Functions for handling NEON vector format information.
enum VectorFormat {
kFormatUndefined = 0xffffffff,
kFormat8B = NEON_8B,
kFormat16B = NEON_16B,
kFormat4H = NEON_4H,
kFormat8H = NEON_8H,
kFormat2S = NEON_2S,
kFormat4S = NEON_4S,
kFormat1D = NEON_1D,
kFormat2D = NEON_2D,
// Scalar formats. We add the scalar bit to distinguish between scalar and
// vector enumerations; the bit is always set in the encoding of scalar ops
// and always clear for vector ops. Although kFormatD and kFormat1D appear
// to be the same, their meaning is subtly different. The first is a scalar
// operation, the second a vector operation that only affects one lane.
kFormatB = NEON_B | NEONScalar,
kFormatH = NEON_H | NEONScalar,
kFormatS = NEON_S | NEONScalar,
kFormatD = NEON_D | NEONScalar,
// A value invented solely for FP16 scalar pairwise simulator trace tests.
kFormat2H = 0xfffffffe
};
const int kMaxLanesPerVector = 16;
VectorFormat VectorFormatHalfWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatHalfLanes(VectorFormat vform);
VectorFormat ScalarFormatFromLaneSize(int lanesize);
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatFillQ(VectorFormat vform);
VectorFormat ScalarFormatFromFormat(VectorFormat vform);
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
// TODO: Make the return types of these functions consistent.
unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
int LaneSizeInBytesFromFormat(VectorFormat vform);
int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
int LaneCountFromFormat(VectorFormat vform);
int MaxLaneCountFromFormat(VectorFormat vform);
bool IsVectorFormat(VectorFormat vform);
int64_t MaxIntFromFormat(VectorFormat vform);
int64_t MinIntFromFormat(VectorFormat vform);
uint64_t MaxUintFromFormat(VectorFormat vform);
// clang-format off
enum NEONFormat {
NF_UNDEF = 0,
NF_8B = 1,
NF_16B = 2,
NF_4H = 3,
NF_8H = 4,
NF_2S = 5,
NF_4S = 6,
NF_1D = 7,
NF_2D = 8,
NF_B = 9,
NF_H = 10,
NF_S = 11,
NF_D = 12
};
// clang-format on
static const unsigned kNEONFormatMaxBits = 6;
struct NEONFormatMap {
// The bit positions in the instruction to consider.
uint8_t bits[kNEONFormatMaxBits];
// Mapping from concatenated bits to format.
NEONFormat map[1 << kNEONFormatMaxBits];
};
class NEONFormatDecoder {
public:
enum SubstitutionMode { kPlaceholder, kFormat };
// Construct a format decoder with increasingly specific format maps for each
// subsitution. If no format map is specified, the default is the integer
// format map.
explicit NEONFormatDecoder(const Instruction* instr) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(IntegerFormatMap());
}
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(format);
}
NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(format0, format1);
}
NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1,
const NEONFormatMap* format2) {
instrbits_ = instr->GetInstructionBits();
SetFormatMaps(format0, format1, format2);
}
// Set the format mapping for all or individual substitutions.
void SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format1 = NULL,
const NEONFormatMap* format2 = NULL) {
VIXL_ASSERT(format0 != NULL);
formats_[0] = format0;
formats_[1] = (format1 == NULL) ? formats_[0] : format1;
formats_[2] = (format2 == NULL) ? formats_[1] : format2;
}
void SetFormatMap(unsigned index, const NEONFormatMap* format) {
VIXL_ASSERT(index <= ArrayLength(formats_));
VIXL_ASSERT(format != NULL);
formats_[index] = format;
}
// Substitute %s in the input string with the placeholder string for each
// register, ie. "'B", "'H", etc.
const char* SubstitutePlaceholders(const char* string) {
return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
}
// Substitute %s in the input string with a new string based on the
// substitution mode.
const char* Substitute(const char* string,
SubstitutionMode mode0 = kFormat,
SubstitutionMode mode1 = kFormat,
SubstitutionMode mode2 = kFormat) {
snprintf(form_buffer_,
sizeof(form_buffer_),
string,
GetSubstitute(0, mode0),
GetSubstitute(1, mode1),
GetSubstitute(2, mode2));
return form_buffer_;
}
// Append a "2" to a mnemonic string based of the state of the Q bit.
const char* Mnemonic(const char* mnemonic) {
if ((instrbits_ & NEON_Q) != 0) {
snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
return mne_buffer_;
}
return mnemonic;
}
VectorFormat GetVectorFormat(int format_index = 0) {
return GetVectorFormat(formats_[format_index]);
}
VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
static const VectorFormat vform[] = {kFormatUndefined,
kFormat8B,
kFormat16B,
kFormat4H,
kFormat8H,
kFormat2S,
kFormat4S,
kFormat1D,
kFormat2D,
kFormatB,
kFormatH,
kFormatS,
kFormatD};
VIXL_ASSERT(GetNEONFormat(format_map) < ArrayLength(vform));
return vform[GetNEONFormat(format_map)];
}
// Built in mappings for common cases.
// The integer format map uses three bits (Q, size<1:0>) to encode the
// "standard" set of NEON integer vector formats.
static const NEONFormatMap* IntegerFormatMap() {
static const NEONFormatMap map =
{{23, 22, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The long integer format map uses two bits (size<1:0>) to encode the
// long set of NEON integer vector formats. These are used in narrow, wide
// and long operations.
static const NEONFormatMap* LongIntegerFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
return &map;
}
// The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
// formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap* FPFormatMap() {
// The FP format map assumes two bits (Q, size<0>) are used to encode the
// NEON FP vector formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap map = {{22, 30},
{NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The FP16 format map uses one bit (Q) to encode the NEON vector format:
// NF_4H, NF_8H.
static const NEONFormatMap* FP16FormatMap() {
static const NEONFormatMap map = {{30}, {NF_4H, NF_8H}};
return &map;
}
// The load/store format map uses three bits (Q, 11, 10) to encode the
// set of NEON vector formats.
static const NEONFormatMap* LoadStoreFormatMap() {
static const NEONFormatMap map =
{{11, 10, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
return &map;
}
// The logical format map uses one bit (Q) to encode the NEON vector format:
// NF_8B, NF_16B.
static const NEONFormatMap* LogicalFormatMap() {
static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
return &map;
}
// The triangular format map uses between two and five bits to encode the NEON
// vector format:
// xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
// x1000->2S, x1001->4S, 10001->2D, all others undefined.
static const NEONFormatMap* TriangularFormatMap() {
static const NEONFormatMap map =
{{19, 18, 17, 16, 30},
{NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
return &map;
}
// The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
// formats: NF_B, NF_H, NF_S, NF_D.
static const NEONFormatMap* ScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
return &map;
}
// The long scalar format map uses two bits (size<1:0>) to encode the longer
// NEON scalar formats: NF_H, NF_S, NF_D.
static const NEONFormatMap* LongScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
return &map;
}
// The FP scalar format map assumes one bit (size<0>) is used to encode the
// NEON FP scalar formats: NF_S, NF_D.
static const NEONFormatMap* FPScalarFormatMap() {
static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
return &map;
}
// The FP scalar pairwise format map assumes two bits (U, size<0>) are used to
// encode the NEON FP scalar formats: NF_H, NF_S, NF_D.
static const NEONFormatMap* FPScalarPairwiseFormatMap() {
static const NEONFormatMap map = {{29, 22}, {NF_H, NF_UNDEF, NF_S, NF_D}};
return &map;
}
// The triangular scalar format map uses between one and four bits to encode
// the NEON FP scalar formats:
// xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
static const NEONFormatMap* TriangularScalarFormatMap() {
static const NEONFormatMap map = {{19, 18, 17, 16},
{NF_UNDEF,
NF_B,
NF_H,
NF_B,
NF_S,
NF_B,
NF_H,
NF_B,
NF_D,
NF_B,
NF_H,
NF_B,
NF_S,
NF_B,
NF_H,
NF_B}};
return &map;
}
private:
// Get a pointer to a string that represents the format or placeholder for
// the specified substitution index, based on the format map and instruction.
const char* GetSubstitute(int index, SubstitutionMode mode) {
if (mode == kFormat) {
return NEONFormatAsString(GetNEONFormat(formats_[index]));
}
VIXL_ASSERT(mode == kPlaceholder);
return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
}
// Get the NEONFormat enumerated value for bits obtained from the
// instruction based on the specified format mapping.
NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
return format_map->map[PickBits(format_map->bits)];
}
// Convert a NEONFormat into a string.
static const char* NEONFormatAsString(NEONFormat format) {
// clang-format off
static const char* formats[] = {
"undefined",
"8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
"b", "h", "s", "d"
};
// clang-format on
VIXL_ASSERT(format < ArrayLength(formats));
return formats[format];
}
// Convert a NEONFormat into a register placeholder string.
static const char* NEONFormatAsPlaceholder(NEONFormat format) {
VIXL_ASSERT((format == NF_B) || (format == NF_H) || (format == NF_S) ||
(format == NF_D) || (format == NF_UNDEF));
// clang-format off
static const char* formats[] = {
"undefined",
"undefined", "undefined", "undefined", "undefined",
"undefined", "undefined", "undefined", "undefined",
"'B", "'H", "'S", "'D"
};
// clang-format on
return formats[format];
}
// Select bits from instrbits_ defined by the bits array, concatenate them,
// and return the value.
uint8_t PickBits(const uint8_t bits[]) {
uint8_t result = 0;
for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
if (bits[b] == 0) break;
result <<= 1;
result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
}
return result;
}
Instr instrbits_;
const NEONFormatMap* formats_[3];
char form_buffer_[64];
char mne_buffer_[16];
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_INSTRUCTIONS_AARCH64_H_

View file

@ -0,0 +1,117 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_INSTRUMENT_AARCH64_H_
#define VIXL_AARCH64_INSTRUMENT_AARCH64_H_
#include "../globals-vixl.h"
#include "../utils-vixl.h"
#include "constants-aarch64.h"
#include "decoder-aarch64.h"
#include "instrument-aarch64.h"
namespace vixl {
namespace aarch64 {
const int kCounterNameMaxLength = 256;
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
enum InstrumentState { InstrumentStateDisable = 0, InstrumentStateEnable = 1 };
enum CounterType {
Gauge = 0, // Gauge counters reset themselves after reading.
Cumulative = 1 // Cumulative counters keep their value after reading.
};
class Counter {
public:
explicit Counter(const char* name, CounterType type = Gauge);
void Increment();
void Enable();
void Disable();
bool IsEnabled();
uint64_t GetCount();
VIXL_DEPRECATED("GetCount", uint64_t count()) { return GetCount(); }
const char* GetName();
VIXL_DEPRECATED("GetName", const char* name()) { return GetName(); }
CounterType GetType();
VIXL_DEPRECATED("GetType", CounterType type()) { return GetType(); }
private:
char name_[kCounterNameMaxLength];
uint64_t count_;
bool enabled_;
CounterType type_;
};
class Instrument : public DecoderVisitor {
public:
explicit Instrument(
const char* datafile = NULL,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
void Enable();
void Disable();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
void Update();
void DumpCounters();
void DumpCounterNames();
void DumpEventMarker(unsigned marker);
void HandleInstrumentationEvent(unsigned event);
Counter* GetCounter(const char* name);
void InstrumentLoadStore(const Instruction* instr);
void InstrumentLoadStorePair(const Instruction* instr);
std::list<Counter*> counters_;
FILE* output_stream_;
// Counter information is dumped every sample_period_ instructions decoded.
// For a sample_period_ = 0 a final counter value is only produced when the
// Instrumentation class is destroyed.
uint64_t sample_period_;
};
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_INSTRUMENT_AARCH64_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,993 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_
#define VIXL_AARCH64_OPERANDS_AARCH64_H_
#include "instructions-aarch64.h"
namespace vixl {
namespace aarch64 {
typedef uint64_t RegList;
static const int kRegListSizeInBits = sizeof(RegList) * 8;
// Registers.
// Some CPURegister methods can return Register or VRegister types, so we need
// to declare them in advance.
class Register;
class VRegister;
class CPURegister {
public:
enum RegisterType {
// The kInvalid value is used to detect uninitialized static instances,
// which are always zero-initialized before any constructors are called.
kInvalid = 0,
kRegister,
kVRegister,
kFPRegister = kVRegister,
kNoRegister
};
CPURegister() : code_(0), size_(0), type_(kNoRegister) {
VIXL_ASSERT(!IsValid());
VIXL_ASSERT(IsNone());
}
CPURegister(unsigned code, unsigned size, RegisterType type)
: code_(code), size_(size), type_(type) {
VIXL_ASSERT(IsValidOrNone());
}
unsigned GetCode() const {
VIXL_ASSERT(IsValid());
return code_;
}
VIXL_DEPRECATED("GetCode", unsigned code() const) { return GetCode(); }
RegisterType GetType() const {
VIXL_ASSERT(IsValidOrNone());
return type_;
}
VIXL_DEPRECATED("GetType", RegisterType type() const) { return GetType(); }
RegList GetBit() const {
VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
}
VIXL_DEPRECATED("GetBit", RegList Bit() const) { return GetBit(); }
int GetSizeInBytes() const {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(size_ % 8 == 0);
return size_ / 8;
}
VIXL_DEPRECATED("GetSizeInBytes", int SizeInBytes() const) {
return GetSizeInBytes();
}
int GetSizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
VIXL_DEPRECATED("GetSizeInBits", unsigned size() const) {
return GetSizeInBits();
}
VIXL_DEPRECATED("GetSizeInBits", int SizeInBits() const) {
return GetSizeInBits();
}
bool Is8Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 8;
}
bool Is16Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 16;
}
bool Is32Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 32;
}
bool Is64Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 64;
}
bool Is128Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 128;
}
bool IsValid() const {
if (IsValidRegister() || IsValidVRegister()) {
VIXL_ASSERT(!IsNone());
return true;
} else {
// This assert is hit when the register has not been properly initialized.
// One cause for this can be an initialisation order fiasco. See
// https://isocpp.org/wiki/faq/ctors#static-init-order for some details.
VIXL_ASSERT(IsNone());
return false;
}
}
bool IsValidRegister() const {
return IsRegister() && ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
}
bool IsValidVRegister() const {
return IsVRegister() && ((size_ == kBRegSize) || (size_ == kHRegSize) ||
(size_ == kSRegSize) || (size_ == kDRegSize) ||
(size_ == kQRegSize)) &&
(code_ < kNumberOfVRegisters);
}
bool IsValidFPRegister() const {
return IsFPRegister() && (code_ < kNumberOfVRegisters);
}
bool IsNone() const {
// kNoRegister types should always have size 0 and code 0.
VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
return type_ == kNoRegister;
}
bool Aliases(const CPURegister& other) const {
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
return (code_ == other.code_) && (type_ == other.type_);
}
bool Is(const CPURegister& other) const {
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
return Aliases(other) && (size_ == other.size_);
}
bool IsZero() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kZeroRegCode);
}
bool IsSP() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kSPRegInternalCode);
}
bool IsRegister() const { return type_ == kRegister; }
bool IsVRegister() const { return type_ == kVRegister; }
bool IsFPRegister() const { return IsS() || IsD(); }
bool IsW() const { return IsValidRegister() && Is32Bits(); }
bool IsX() const { return IsValidRegister() && Is64Bits(); }
// These assertions ensure that the size and type of the register are as
// described. They do not consider the number of lanes that make up a vector.
// So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
// does not imply Is1D() or Is8B().
// Check the number of lanes, ie. the format of the vector, using methods such
// as Is8B(), Is1D(), etc. in the VRegister class.
bool IsV() const { return IsVRegister(); }
bool IsB() const { return IsV() && Is8Bits(); }
bool IsH() const { return IsV() && Is16Bits(); }
bool IsS() const { return IsV() && Is32Bits(); }
bool IsD() const { return IsV() && Is64Bits(); }
bool IsQ() const { return IsV() && Is128Bits(); }
// Semantic type for sdot and udot instructions.
bool IsS4B() const { return IsS(); }
const VRegister& S4B() const { return S(); }
const Register& W() const;
const Register& X() const;
const VRegister& V() const;
const VRegister& B() const;
const VRegister& H() const;
const VRegister& S() const;
const VRegister& D() const;
const VRegister& Q() const;
bool IsSameType(const CPURegister& other) const {
return type_ == other.type_;
}
bool IsSameSizeAndType(const CPURegister& other) const {
return (size_ == other.size_) && IsSameType(other);
}
protected:
unsigned code_;
int size_;
RegisterType type_;
private:
bool IsValidOrNone() const { return IsValid() || IsNone(); }
};
class Register : public CPURegister {
public:
Register() : CPURegister() {}
explicit Register(const CPURegister& other)
: CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()) {
VIXL_ASSERT(IsValidRegister());
}
Register(unsigned code, unsigned size) : CPURegister(code, size, kRegister) {}
bool IsValid() const {
VIXL_ASSERT(IsRegister() || IsNone());
return IsValidRegister();
}
static const Register& GetWRegFromCode(unsigned code);
VIXL_DEPRECATED("GetWRegFromCode",
static const Register& WRegFromCode(unsigned code)) {
return GetWRegFromCode(code);
}
static const Register& GetXRegFromCode(unsigned code);
VIXL_DEPRECATED("GetXRegFromCode",
static const Register& XRegFromCode(unsigned code)) {
return GetXRegFromCode(code);
}
private:
static const Register wregisters[];
static const Register xregisters[];
};
namespace internal {
template <int size_in_bits>
class FixedSizeRegister : public Register {
public:
FixedSizeRegister() : Register() {}
explicit FixedSizeRegister(unsigned code) : Register(code, size_in_bits) {
VIXL_ASSERT(IsValidRegister());
}
explicit FixedSizeRegister(const Register& other)
: Register(other.GetCode(), size_in_bits) {
VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
VIXL_ASSERT(IsValidRegister());
}
explicit FixedSizeRegister(const CPURegister& other)
: Register(other.GetCode(), other.GetSizeInBits()) {
VIXL_ASSERT(other.GetType() == kRegister);
VIXL_ASSERT(other.GetSizeInBits() == size_in_bits);
VIXL_ASSERT(IsValidRegister());
}
bool IsValid() const {
return Register::IsValid() && (GetSizeInBits() == size_in_bits);
}
};
} // namespace internal
typedef internal::FixedSizeRegister<kXRegSize> XRegister;
typedef internal::FixedSizeRegister<kWRegSize> WRegister;
class VRegister : public CPURegister {
public:
VRegister() : CPURegister(), lanes_(1) {}
explicit VRegister(const CPURegister& other)
: CPURegister(other.GetCode(), other.GetSizeInBits(), other.GetType()),
lanes_(1) {
VIXL_ASSERT(IsValidVRegister());
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
VRegister(unsigned code, unsigned size, unsigned lanes = 1)
: CPURegister(code, size, kVRegister), lanes_(lanes) {
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
VRegister(unsigned code, VectorFormat format)
: CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister),
lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) {
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
bool IsValid() const {
VIXL_ASSERT(IsVRegister() || IsNone());
return IsValidVRegister();
}
static const VRegister& GetBRegFromCode(unsigned code);
VIXL_DEPRECATED("GetBRegFromCode",
static const VRegister& BRegFromCode(unsigned code)) {
return GetBRegFromCode(code);
}
static const VRegister& GetHRegFromCode(unsigned code);
VIXL_DEPRECATED("GetHRegFromCode",
static const VRegister& HRegFromCode(unsigned code)) {
return GetHRegFromCode(code);
}
static const VRegister& GetSRegFromCode(unsigned code);
VIXL_DEPRECATED("GetSRegFromCode",
static const VRegister& SRegFromCode(unsigned code)) {
return GetSRegFromCode(code);
}
static const VRegister& GetDRegFromCode(unsigned code);
VIXL_DEPRECATED("GetDRegFromCode",
static const VRegister& DRegFromCode(unsigned code)) {
return GetDRegFromCode(code);
}
static const VRegister& GetQRegFromCode(unsigned code);
VIXL_DEPRECATED("GetQRegFromCode",
static const VRegister& QRegFromCode(unsigned code)) {
return GetQRegFromCode(code);
}
static const VRegister& GetVRegFromCode(unsigned code);
VIXL_DEPRECATED("GetVRegFromCode",
static const VRegister& VRegFromCode(unsigned code)) {
return GetVRegFromCode(code);
}
VRegister V8B() const { return VRegister(code_, kDRegSize, 8); }
VRegister V16B() const { return VRegister(code_, kQRegSize, 16); }
VRegister V2H() const { return VRegister(code_, kSRegSize, 2); }
VRegister V4H() const { return VRegister(code_, kDRegSize, 4); }
VRegister V8H() const { return VRegister(code_, kQRegSize, 8); }
VRegister V2S() const { return VRegister(code_, kDRegSize, 2); }
VRegister V4S() const { return VRegister(code_, kQRegSize, 4); }
VRegister V2D() const { return VRegister(code_, kQRegSize, 2); }
VRegister V1D() const { return VRegister(code_, kDRegSize, 1); }
bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); }
bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); }
bool Is2H() const { return (Is32Bits() && (lanes_ == 2)); }
bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); }
bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); }
bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); }
bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); }
bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); }
bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); }
// For consistency, we assert the number of lanes of these scalar registers,
// even though there are no vectors of equivalent total size with which they
// could alias.
bool Is1B() const {
VIXL_ASSERT(!(Is8Bits() && IsVector()));
return Is8Bits();
}
bool Is1H() const {
VIXL_ASSERT(!(Is16Bits() && IsVector()));
return Is16Bits();
}
bool Is1S() const {
VIXL_ASSERT(!(Is32Bits() && IsVector()));
return Is32Bits();
}
// Semantic type for sdot and udot instructions.
bool Is1S4B() const { return Is1S(); }
bool IsLaneSizeB() const { return GetLaneSizeInBits() == kBRegSize; }
bool IsLaneSizeH() const { return GetLaneSizeInBits() == kHRegSize; }
bool IsLaneSizeS() const { return GetLaneSizeInBits() == kSRegSize; }
bool IsLaneSizeD() const { return GetLaneSizeInBits() == kDRegSize; }
int GetLanes() const { return lanes_; }
VIXL_DEPRECATED("GetLanes", int lanes() const) { return GetLanes(); }
bool IsScalar() const { return lanes_ == 1; }
bool IsVector() const { return lanes_ > 1; }
bool IsSameFormat(const VRegister& other) const {
return (size_ == other.size_) && (lanes_ == other.lanes_);
}
unsigned GetLaneSizeInBytes() const { return GetSizeInBytes() / lanes_; }
VIXL_DEPRECATED("GetLaneSizeInBytes", unsigned LaneSizeInBytes() const) {
return GetLaneSizeInBytes();
}
unsigned GetLaneSizeInBits() const { return GetLaneSizeInBytes() * 8; }
VIXL_DEPRECATED("GetLaneSizeInBits", unsigned LaneSizeInBits() const) {
return GetLaneSizeInBits();
}
private:
static const VRegister bregisters[];
static const VRegister hregisters[];
static const VRegister sregisters[];
static const VRegister dregisters[];
static const VRegister qregisters[];
static const VRegister vregisters[];
int lanes_;
};
// Backward compatibility for FPRegisters.
typedef VRegister FPRegister;
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and VRegister
// variants are provided for convenience.
const Register NoReg;
const VRegister NoVReg;
const FPRegister NoFPReg; // For backward compatibility.
const CPURegister NoCPUReg;
#define DEFINE_REGISTERS(N) \
const WRegister w##N(N); \
const XRegister x##N(N);
AARCH64_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
const WRegister wsp(kSPRegInternalCode);
const XRegister sp(kSPRegInternalCode);
#define DEFINE_VREGISTERS(N) \
const VRegister b##N(N, kBRegSize); \
const VRegister h##N(N, kHRegSize); \
const VRegister s##N(N, kSRegSize); \
const VRegister d##N(N, kDRegSize); \
const VRegister q##N(N, kQRegSize); \
const VRegister v##N(N, kQRegSize);
AARCH64_REGISTER_CODE_LIST(DEFINE_VREGISTERS)
#undef DEFINE_VREGISTERS
// Register aliases.
const XRegister ip0 = x16;
const XRegister ip1 = x17;
const XRegister lr = x30;
const XRegister xzr = x31;
const WRegister wzr = w31;
// AreAliased returns true if any of the named registers overlap. Arguments
// set to NoReg are ignored. The system stack pointer may be specified.
bool AreAliased(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoReg,
const CPURegister& reg4 = NoReg,
const CPURegister& reg5 = NoReg,
const CPURegister& reg6 = NoReg,
const CPURegister& reg7 = NoReg,
const CPURegister& reg8 = NoReg);
// AreSameSizeAndType returns true if all of the specified registers have the
// same size, and are of the same type. The system stack pointer may be
// specified. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
bool AreSameSizeAndType(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoCPUReg,
const CPURegister& reg4 = NoCPUReg,
const CPURegister& reg5 = NoCPUReg,
const CPURegister& reg6 = NoCPUReg,
const CPURegister& reg7 = NoCPUReg,
const CPURegister& reg8 = NoCPUReg);
// AreEven returns true if all of the specified registers have even register
// indices. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
bool AreEven(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoReg,
const CPURegister& reg4 = NoReg,
const CPURegister& reg5 = NoReg,
const CPURegister& reg6 = NoReg,
const CPURegister& reg7 = NoReg,
const CPURegister& reg8 = NoReg);
// AreConsecutive returns true if all of the specified registers are
// consecutive in the register file. Arguments set to NoReg are ignored, as are
// any subsequent arguments. At least one argument (reg1) must be valid
// (not NoCPUReg).
bool AreConsecutive(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoCPUReg,
const CPURegister& reg4 = NoCPUReg);
// AreSameFormat returns true if all of the specified VRegisters have the same
// vector format. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoVReg).
bool AreSameFormat(const VRegister& reg1,
const VRegister& reg2,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
// AreConsecutive returns true if all of the specified VRegisters are
// consecutive in the register file. Arguments set to NoReg are ignored, as are
// any subsequent arguments. At least one argument (reg1) must be valid
// (not NoVReg).
bool AreConsecutive(const VRegister& reg1,
const VRegister& reg2,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
// Lists of registers.
class CPURegList {
public:
explicit CPURegList(CPURegister reg1,
CPURegister reg2 = NoCPUReg,
CPURegister reg3 = NoCPUReg,
CPURegister reg4 = NoCPUReg)
: list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()),
size_(reg1.GetSizeInBits()),
type_(reg1.GetType()) {
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
VIXL_ASSERT(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
VIXL_ASSERT(IsValid());
}
CPURegList(CPURegister::RegisterType type,
unsigned size,
unsigned first_reg,
unsigned last_reg)
: size_(size), type_(type) {
VIXL_ASSERT(
((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
((type == CPURegister::kVRegister) &&
(last_reg < kNumberOfVRegisters)));
VIXL_ASSERT(last_reg >= first_reg);
list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
list_ &= ~((UINT64_C(1) << first_reg) - 1);
VIXL_ASSERT(IsValid());
}
CPURegister::RegisterType GetType() const {
VIXL_ASSERT(IsValid());
return type_;
}
VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) {
return GetType();
}
// Combine another CPURegList into this one. Registers that already exist in
// this list are left unchanged. The type and size of the registers in the
// 'other' list must match those in this list.
void Combine(const CPURegList& other) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
list_ |= other.GetList();
}
// Remove every register in the other CPURegList from this one. Registers that
// do not exist in this list are ignored. The type and size of the registers
// in the 'other' list must match those in this list.
void Remove(const CPURegList& other) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
list_ &= ~other.GetList();
}
// Variants of Combine and Remove which take a single register.
void Combine(const CPURegister& other) {
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetSizeInBits() == size_);
Combine(other.GetCode());
}
void Remove(const CPURegister& other) {
VIXL_ASSERT(other.GetType() == type_);
VIXL_ASSERT(other.GetSizeInBits() == size_);
Remove(other.GetCode());
}
// Variants of Combine and Remove which take a single register by its code;
// the type and size of the register is inferred from this list.
void Combine(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ |= (UINT64_C(1) << code);
}
void Remove(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ &= ~(UINT64_C(1) << code);
}
static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
}
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
}
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
bool Overlaps(const CPURegList& other) const {
return (type_ == other.type_) && ((list_ & other.list_) != 0);
}
RegList GetList() const {
VIXL_ASSERT(IsValid());
return list_;
}
VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); }
void SetList(RegList new_list) {
VIXL_ASSERT(IsValid());
list_ = new_list;
}
VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) {
return SetList(new_list);
}
// Remove all callee-saved registers from the list. This can be useful when
// preparing registers for an AAPCS64 function call, for example.
void RemoveCalleeSaved();
CPURegister PopLowestIndex();
CPURegister PopHighestIndex();
// AAPCS64 callee-saved registers.
static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
// AAPCS64 caller-saved registers. Note that this includes lr.
// TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
// 64-bits being caller-saved.
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
bool IsEmpty() const {
VIXL_ASSERT(IsValid());
return list_ == 0;
}
bool IncludesAliasOf(const CPURegister& other) const {
VIXL_ASSERT(IsValid());
return (type_ == other.GetType()) && ((other.GetBit() & list_) != 0);
}
bool IncludesAliasOf(int code) const {
VIXL_ASSERT(IsValid());
return ((code & list_) != 0);
}
int GetCount() const {
VIXL_ASSERT(IsValid());
return CountSetBits(list_);
}
VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); }
int GetRegisterSizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) {
return GetRegisterSizeInBits();
}
int GetRegisterSizeInBytes() const {
int size_in_bits = GetRegisterSizeInBits();
VIXL_ASSERT((size_in_bits % 8) == 0);
return size_in_bits / 8;
}
VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) {
return GetRegisterSizeInBytes();
}
unsigned GetTotalSizeInBytes() const {
VIXL_ASSERT(IsValid());
return GetRegisterSizeInBytes() * GetCount();
}
VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) {
return GetTotalSizeInBytes();
}
private:
RegList list_;
int size_;
CPURegister::RegisterType type_;
bool IsValid() const;
};
// AAPCS64 callee-saved registers.
extern const CPURegList kCalleeSaved;
extern const CPURegList kCalleeSavedV;
// AAPCS64 caller-saved registers. Note that this includes lr.
extern const CPURegList kCallerSaved;
extern const CPURegList kCallerSavedV;
// Operand.
class Operand {
public:
// #<immediate>
// where <immediate> is int64_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(int64_t immediate = 0); // NOLINT(runtime/explicit)
// rm, {<shift> #<shift_amount>}
// where <shift> is one of {LSL, LSR, ASR, ROR}.
// <shift_amount> is uint6_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(Register reg,
Shift shift = LSL,
unsigned shift_amount = 0); // NOLINT(runtime/explicit)
// rm, {<extend> {#<shift_amount>}}
// where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
// <shift_amount> is uint2_t.
explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
bool IsImmediate() const;
bool IsPlainRegister() const;
bool IsShiftedRegister() const;
bool IsExtendedRegister() const;
bool IsZero() const;
// This returns an LSL shift (<= 4) operand as an equivalent extend operand,
// which helps in the encoding of instructions that use the stack pointer.
Operand ToExtendedRegister() const;
int64_t GetImmediate() const {
VIXL_ASSERT(IsImmediate());
return immediate_;
}
VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) {
return GetImmediate();
}
int64_t GetEquivalentImmediate() const {
return IsZero() ? 0 : GetImmediate();
}
Register GetRegister() const {
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return reg_;
}
VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); }
Register GetBaseRegister() const { return GetRegister(); }
Shift GetShift() const {
VIXL_ASSERT(IsShiftedRegister());
return shift_;
}
VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
Extend GetExtend() const {
VIXL_ASSERT(IsExtendedRegister());
return extend_;
}
VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
unsigned GetShiftAmount() const {
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return shift_amount_;
}
VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
return GetShiftAmount();
}
private:
int64_t immediate_;
Register reg_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
};
// MemOperand represents the addressing mode of a load or store instruction.
class MemOperand {
public:
// Creates an invalid `MemOperand`.
MemOperand();
explicit MemOperand(Register base,
int64_t offset = 0,
AddrMode addrmode = Offset);
MemOperand(Register base,
Register regoffset,
Shift shift = LSL,
unsigned shift_amount = 0);
MemOperand(Register base,
Register regoffset,
Extend extend,
unsigned shift_amount = 0);
MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset);
const Register& GetBaseRegister() const { return base_; }
VIXL_DEPRECATED("GetBaseRegister", const Register& base() const) {
return GetBaseRegister();
}
const Register& GetRegisterOffset() const { return regoffset_; }
VIXL_DEPRECATED("GetRegisterOffset", const Register& regoffset() const) {
return GetRegisterOffset();
}
int64_t GetOffset() const { return offset_; }
VIXL_DEPRECATED("GetOffset", int64_t offset() const) { return GetOffset(); }
AddrMode GetAddrMode() const { return addrmode_; }
VIXL_DEPRECATED("GetAddrMode", AddrMode addrmode() const) {
return GetAddrMode();
}
Shift GetShift() const { return shift_; }
VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
Extend GetExtend() const { return extend_; }
VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
unsigned GetShiftAmount() const { return shift_amount_; }
VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
return GetShiftAmount();
}
bool IsImmediateOffset() const;
bool IsRegisterOffset() const;
bool IsPreIndex() const;
bool IsPostIndex() const;
void AddOffset(int64_t offset);
bool IsValid() const {
return base_.IsValid() &&
((addrmode_ == Offset) || (addrmode_ == PreIndex) ||
(addrmode_ == PostIndex)) &&
((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) &&
((offset_ == 0) || !regoffset_.IsValid());
}
bool Equals(const MemOperand& other) const {
return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) &&
(offset_ == other.offset_) && (addrmode_ == other.addrmode_) &&
(shift_ == other.shift_) && (extend_ == other.extend_) &&
(shift_amount_ == other.shift_amount_);
}
private:
Register base_;
Register regoffset_;
int64_t offset_;
AddrMode addrmode_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
};
// This an abstraction that can represent a register or memory location. The
// `MacroAssembler` provides helpers to move data between generic operands.
class GenericOperand {
public:
GenericOperand() { VIXL_ASSERT(!IsValid()); }
GenericOperand(const CPURegister& reg); // NOLINT(runtime/explicit)
GenericOperand(const MemOperand& mem_op,
size_t mem_op_size = 0); // NOLINT(runtime/explicit)
bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); }
bool Equals(const GenericOperand& other) const;
bool IsCPURegister() const {
VIXL_ASSERT(IsValid());
return cpu_register_.IsValid();
}
bool IsRegister() const {
return IsCPURegister() && cpu_register_.IsRegister();
}
bool IsVRegister() const {
return IsCPURegister() && cpu_register_.IsVRegister();
}
bool IsSameCPURegisterType(const GenericOperand& other) {
return IsCPURegister() && other.IsCPURegister() &&
GetCPURegister().IsSameType(other.GetCPURegister());
}
bool IsMemOperand() const {
VIXL_ASSERT(IsValid());
return mem_op_.IsValid();
}
CPURegister GetCPURegister() const {
VIXL_ASSERT(IsCPURegister());
return cpu_register_;
}
MemOperand GetMemOperand() const {
VIXL_ASSERT(IsMemOperand());
return mem_op_;
}
size_t GetMemOperandSizeInBytes() const {
VIXL_ASSERT(IsMemOperand());
return mem_op_size_;
}
size_t GetSizeInBytes() const {
return IsCPURegister() ? cpu_register_.GetSizeInBytes()
: GetMemOperandSizeInBytes();
}
size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; }
private:
CPURegister cpu_register_;
MemOperand mem_op_;
// The size of the memory region pointed to, in bytes.
// We only support sizes up to X/D register sizes.
size_t mem_op_size_;
};
}
} // namespace vixl::aarch64
#endif // VIXL_AARCH64_OPERANDS_AARCH64_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,192 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
#define VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_
#include "instructions-aarch64.h"
namespace vixl {
namespace aarch64 {
// Debug instructions.
//
// VIXL's macro-assembler and simulator support a few pseudo instructions to
// make debugging easier. These pseudo instructions do not exist on real
// hardware.
//
// TODO: Also consider allowing these pseudo-instructions to be disabled in the
// simulator, so that users can check that the input is a valid native code.
// (This isn't possible in all cases. Printf won't work, for example.)
//
// Each debug pseudo instruction is represented by a HLT instruction. The HLT
// immediate field is used to identify the type of debug pseudo instruction.
enum DebugHltOpcode {
kUnreachableOpcode = 0xdeb0,
kPrintfOpcode,
kTraceOpcode,
kLogOpcode,
kRuntimeCallOpcode,
kSetCPUFeaturesOpcode,
kEnableCPUFeaturesOpcode,
kDisableCPUFeaturesOpcode,
kSaveCPUFeaturesOpcode,
kRestoreCPUFeaturesOpcode,
// Aliases.
kDebugHltFirstOpcode = kUnreachableOpcode,
kDebugHltLastOpcode = kLogOpcode
};
VIXL_DEPRECATED("DebugHltOpcode", typedef DebugHltOpcode DebugHltOpcodes);
// Each pseudo instruction uses a custom encoding for additional arguments, as
// described below.
// Unreachable - kUnreachableOpcode
//
// Instruction which should never be executed. This is used as a guard in parts
// of the code that should not be reachable, such as in data encoded inline in
// the instructions.
// Printf - kPrintfOpcode
// - arg_count: The number of arguments.
// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
//
// Simulate a call to printf.
//
// Floating-point and integer arguments are passed in separate sets of registers
// in AAPCS64 (even for varargs functions), so it is not possible to determine
// the type of each argument without some information about the values that were
// passed in. This information could be retrieved from the printf format string,
// but the format string is not trivial to parse so we encode the relevant
// information with the HLT instruction.
//
// Also, the following registers are populated (as if for a native Aarch64
// call):
// x0: The format string
// x1-x7: Optional arguments, if type == CPURegister::kRegister
// d0-d7: Optional arguments, if type == CPURegister::kFPRegister
const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
const unsigned kPrintfLength = 3 * kInstructionSize;
const unsigned kPrintfMaxArgCount = 4;
// The argument pattern is a set of two-bit-fields, each with one of the
// following values:
enum PrintfArgPattern {
kPrintfArgW = 1,
kPrintfArgX = 2,
// There is no kPrintfArgS because floats are always converted to doubles in C
// varargs calls.
kPrintfArgD = 3
};
static const unsigned kPrintfArgPatternBits = 2;
// Trace - kTraceOpcode
// - parameter: TraceParameter stored as a uint32_t
// - command: TraceCommand stored as a uint32_t
//
// Allow for trace management in the generated code. This enables or disables
// automatic tracing of the specified information for every simulated
// instruction.
const unsigned kTraceParamsOffset = 1 * kInstructionSize;
const unsigned kTraceCommandOffset = 2 * kInstructionSize;
const unsigned kTraceLength = 3 * kInstructionSize;
// Trace parameters.
enum TraceParameters {
LOG_DISASM = 1 << 0, // Log disassembly.
LOG_REGS = 1 << 1, // Log general purpose registers.
LOG_VREGS = 1 << 2, // Log NEON and floating-point registers.
LOG_SYSREGS = 1 << 3, // Log the flags and system registers.
LOG_WRITE = 1 << 4, // Log writes to memory.
LOG_BRANCH = 1 << 5, // Log taken branches.
LOG_NONE = 0,
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE | LOG_BRANCH
};
// Trace commands.
enum TraceCommand { TRACE_ENABLE = 1, TRACE_DISABLE = 2 };
// Log - kLogOpcode
// - parameter: TraceParameter stored as a uint32_t
//
// Print the specified information once. This mechanism is separate from Trace.
// In particular, _all_ of the specified registers are printed, rather than just
// the registers that the instruction writes.
//
// Any combination of the TraceParameters values can be used, except that
// LOG_DISASM is not supported for Log.
const unsigned kLogParamsOffset = 1 * kInstructionSize;
const unsigned kLogLength = 2 * kInstructionSize;
// Runtime call simulation - kRuntimeCallOpcode
enum RuntimeCallType { kCallRuntime, kTailCallRuntime };
const unsigned kRuntimeCallWrapperOffset = 1 * kInstructionSize;
// The size of a pointer on host.
const unsigned kRuntimeCallAddressSize = sizeof(uintptr_t);
const unsigned kRuntimeCallFunctionOffset =
kRuntimeCallWrapperOffset + kRuntimeCallAddressSize;
const unsigned kRuntimeCallTypeOffset =
kRuntimeCallFunctionOffset + kRuntimeCallAddressSize;
const unsigned kRuntimeCallLength = kRuntimeCallTypeOffset + sizeof(uint32_t);
// Enable or disable CPU features - kSetCPUFeaturesOpcode
// - kEnableCPUFeaturesOpcode
// - kDisableCPUFeaturesOpcode
// - parameter[...]: A list of `CPUFeatures::Feature`s, encoded as
// ConfigureCPUFeaturesElementType and terminated with CPUFeatures::kNone.
// - [Padding to align to kInstructionSize.]
//
// 'Set' completely overwrites the existing CPU features.
// 'Enable' and 'Disable' update the existing CPU features.
//
// These mechanisms allows users to strictly check the use of CPU features in
// different regions of code.
//
// These have no effect on the set of 'seen' features (as reported by
// CPUFeaturesAuditor::HasSeen(...)).
typedef uint8_t ConfigureCPUFeaturesElementType;
const unsigned kConfigureCPUFeaturesListOffset = 1 * kInstructionSize;
// Save or restore CPU features - kSaveCPUFeaturesOpcode
// - kRestoreCPUFeaturesOpcode
//
// These mechanisms provide a stack-like mechanism for preserving the CPU
// features, or restoring the last-preserved features. These pseudo-instructions
// take no arguments.
//
// These have no effect on the set of 'seen' features (as reported by
// CPUFeaturesAuditor::HasSeen(...)).
} // namespace aarch64
} // namespace vixl
#endif // VIXL_AARCH64_SIMULATOR_CONSTANTS_AARCH64_H_

View file

@ -0,0 +1,101 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_ASSEMBLER_BASE_H
#define VIXL_ASSEMBLER_BASE_H
#include "code-buffer-vixl.h"
namespace vixl {
class CodeBufferCheckScope;
namespace internal {
class AssemblerBase {
public:
AssemblerBase() : allow_assembler_(false) {}
explicit AssemblerBase(size_t capacity)
: buffer_(capacity), allow_assembler_(false) {}
AssemblerBase(byte* buffer, size_t capacity)
: buffer_(buffer, capacity), allow_assembler_(false) {}
virtual ~AssemblerBase() {}
// Finalize a code buffer of generated instructions. This function must be
// called before executing or copying code from the buffer.
void FinalizeCode() { GetBuffer()->SetClean(); }
ptrdiff_t GetCursorOffset() const { return GetBuffer().GetCursorOffset(); }
// Return the address of the cursor.
template <typename T>
T GetCursorAddress() const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetBuffer().GetOffsetAddress<T>(GetCursorOffset());
}
size_t GetSizeOfCodeGenerated() const { return GetCursorOffset(); }
// Accessors.
CodeBuffer* GetBuffer() { return &buffer_; }
const CodeBuffer& GetBuffer() const { return buffer_; }
bool AllowAssembler() const { return allow_assembler_; }
protected:
void SetAllowAssembler(bool allow) { allow_assembler_ = allow; }
// CodeBufferCheckScope must be able to temporarily allow the assembler.
friend class vixl::CodeBufferCheckScope;
// Buffer where the code is emitted.
CodeBuffer buffer_;
private:
bool allow_assembler_;
public:
// Deprecated public interface.
// Return the address of an offset in the buffer.
template <typename T>
VIXL_DEPRECATED("GetBuffer().GetOffsetAddress<T>(offset)",
T GetOffsetAddress(ptrdiff_t offset) const) {
return GetBuffer().GetOffsetAddress<T>(offset);
}
// Return the address of the start of the buffer.
template <typename T>
VIXL_DEPRECATED("GetBuffer().GetStartAddress<T>()",
T GetStartAddress() const) {
return GetBuffer().GetOffsetAddress<T>(0);
}
};
} // namespace internal
} // namespace vixl
#endif // VIXL_ASSEMBLER_BASE_H

View file

@ -0,0 +1,191 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CODE_BUFFER_H
#define VIXL_CODE_BUFFER_H
#include <cstring>
#include "globals-vixl.h"
#include "utils-vixl.h"
namespace vixl {
class CodeBuffer {
public:
static const size_t kDefaultCapacity = 4 * KBytes;
explicit CodeBuffer(size_t capacity = kDefaultCapacity);
CodeBuffer(byte* buffer, size_t capacity);
~CodeBuffer();
void Reset();
#ifdef VIXL_CODE_BUFFER_MMAP
void SetExecutable();
void SetWritable();
#else
// These require page-aligned memory blocks, which we can only guarantee with
// mmap.
VIXL_NO_RETURN_IN_DEBUG_MODE void SetExecutable() { VIXL_UNIMPLEMENTED(); }
VIXL_NO_RETURN_IN_DEBUG_MODE void SetWritable() { VIXL_UNIMPLEMENTED(); }
#endif
ptrdiff_t GetOffsetFrom(ptrdiff_t offset) const {
ptrdiff_t cursor_offset = cursor_ - buffer_;
VIXL_ASSERT((offset >= 0) && (offset <= cursor_offset));
return cursor_offset - offset;
}
VIXL_DEPRECATED("GetOffsetFrom",
ptrdiff_t OffsetFrom(ptrdiff_t offset) const) {
return GetOffsetFrom(offset);
}
ptrdiff_t GetCursorOffset() const { return GetOffsetFrom(0); }
VIXL_DEPRECATED("GetCursorOffset", ptrdiff_t CursorOffset() const) {
return GetCursorOffset();
}
void Rewind(ptrdiff_t offset) {
byte* rewound_cursor = buffer_ + offset;
VIXL_ASSERT((buffer_ <= rewound_cursor) && (rewound_cursor <= cursor_));
cursor_ = rewound_cursor;
}
template <typename T>
T GetOffsetAddress(ptrdiff_t offset) const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
VIXL_ASSERT((offset >= 0) && (offset <= (cursor_ - buffer_)));
return reinterpret_cast<T>(buffer_ + offset);
}
// Return the address of the start or end of the emitted code.
template <typename T>
T GetStartAddress() const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(0);
}
template <typename T>
T GetEndAddress() const {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(GetSizeInBytes());
}
size_t GetRemainingBytes() const {
VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_)));
return (buffer_ + capacity_) - cursor_;
}
VIXL_DEPRECATED("GetRemainingBytes", size_t RemainingBytes() const) {
return GetRemainingBytes();
}
size_t GetSizeInBytes() const {
VIXL_ASSERT((cursor_ >= buffer_) && (cursor_ <= (buffer_ + capacity_)));
return cursor_ - buffer_;
}
// A code buffer can emit:
// * 8, 16, 32 or 64-bit data: constant.
// * 16 or 32-bit data: instruction.
// * string: debug info.
void Emit8(uint8_t data) { Emit(data); }
void Emit16(uint16_t data) { Emit(data); }
void Emit32(uint32_t data) { Emit(data); }
void Emit64(uint64_t data) { Emit(data); }
void EmitString(const char* string);
void EmitData(const void* data, size_t size);
template <typename T>
void Emit(T value) {
VIXL_ASSERT(HasSpaceFor(sizeof(value)));
dirty_ = true;
memcpy(cursor_, &value, sizeof(value));
cursor_ += sizeof(value);
}
void UpdateData(size_t offset, const void* data, size_t size);
// Align to 32bit.
void Align();
// Ensure there is enough space for and emit 'n' zero bytes.
void EmitZeroedBytes(int n);
bool Is16bitAligned() const { return IsAligned<2>(cursor_); }
bool Is32bitAligned() const { return IsAligned<4>(cursor_); }
size_t GetCapacity() const { return capacity_; }
VIXL_DEPRECATED("GetCapacity", size_t capacity() const) {
return GetCapacity();
}
bool IsManaged() const { return managed_; }
void Grow(size_t new_capacity);
bool IsDirty() const { return dirty_; }
void SetClean() { dirty_ = false; }
bool HasSpaceFor(size_t amount) const {
return GetRemainingBytes() >= amount;
}
void EnsureSpaceFor(size_t amount, bool* has_grown) {
bool is_full = !HasSpaceFor(amount);
if (is_full) Grow(capacity_ * 2 + amount);
VIXL_ASSERT(has_grown != NULL);
*has_grown = is_full;
}
void EnsureSpaceFor(size_t amount) {
bool dummy;
EnsureSpaceFor(amount, &dummy);
}
private:
// Backing store of the buffer.
byte* buffer_;
// If true the backing store is allocated and deallocated by the buffer. The
// backing store can then grow on demand. If false the backing store is
// provided by the user and cannot be resized internally.
bool managed_;
// Pointer to the next location to be written.
byte* cursor_;
// True if there has been any write since the buffer was created or cleaned.
bool dirty_;
// Capacity in bytes of the backing store.
size_t capacity_;
};
} // namespace vixl
#endif // VIXL_CODE_BUFFER_H

View file

@ -0,0 +1,322 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CODE_GENERATION_SCOPES_H_
#define VIXL_CODE_GENERATION_SCOPES_H_
#include "assembler-base-vixl.h"
#include "macro-assembler-interface.h"
namespace vixl {
// This scope will:
// - Allow code emission from the specified `Assembler`.
// - Optionally reserve space in the `CodeBuffer` (if it is managed by VIXL).
// - Optionally, on destruction, check the size of the generated code.
// (The size can be either exact or a maximum size.)
class CodeBufferCheckScope {
public:
// Tell whether or not the scope needs to ensure the associated CodeBuffer
// has enough space for the requested size.
enum BufferSpacePolicy {
kReserveBufferSpace,
kDontReserveBufferSpace,
// Deprecated, but kept for backward compatibility.
kCheck = kReserveBufferSpace,
kNoCheck = kDontReserveBufferSpace
};
// Tell whether or not the scope should assert the amount of code emitted
// within the scope is consistent with the requested amount.
enum SizePolicy {
kNoAssert, // Do not check the size of the code emitted.
kExactSize, // The code emitted must be exactly size bytes.
kMaximumSize // The code emitted must be at most size bytes.
};
// This constructor implicitly calls `Open` to initialise the scope
// (`assembler` must not be `NULL`), so it is ready to use immediately after
// it has been constructed.
CodeBufferCheckScope(internal::AssemblerBase* assembler,
size_t size,
BufferSpacePolicy check_policy = kReserveBufferSpace,
SizePolicy size_policy = kMaximumSize)
: assembler_(NULL), initialised_(false) {
Open(assembler, size, check_policy, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
CodeBufferCheckScope() : assembler_(NULL), initialised_(false) {
// Nothing to do.
}
virtual ~CodeBufferCheckScope() { Close(); }
// This function performs the actual initialisation work.
void Open(internal::AssemblerBase* assembler,
size_t size,
BufferSpacePolicy check_policy = kReserveBufferSpace,
SizePolicy size_policy = kMaximumSize) {
VIXL_ASSERT(!initialised_);
VIXL_ASSERT(assembler != NULL);
assembler_ = assembler;
if (check_policy == kReserveBufferSpace) {
assembler->GetBuffer()->EnsureSpaceFor(size);
}
#ifdef VIXL_DEBUG
limit_ = assembler_->GetSizeOfCodeGenerated() + size;
assert_policy_ = size_policy;
previous_allow_assembler_ = assembler_->AllowAssembler();
assembler_->SetAllowAssembler(true);
#else
USE(size_policy);
#endif
initialised_ = true;
}
// This function performs the cleaning-up work. It must succeed even if the
// scope has not been opened. It is safe to call multiple times.
void Close() {
#ifdef VIXL_DEBUG
if (!initialised_) {
return;
}
assembler_->SetAllowAssembler(previous_allow_assembler_);
switch (assert_policy_) {
case kNoAssert:
break;
case kExactSize:
VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() == limit_);
break;
case kMaximumSize:
VIXL_ASSERT(assembler_->GetSizeOfCodeGenerated() <= limit_);
break;
default:
VIXL_UNREACHABLE();
}
#endif
initialised_ = false;
}
protected:
internal::AssemblerBase* assembler_;
SizePolicy assert_policy_;
size_t limit_;
bool previous_allow_assembler_;
bool initialised_;
};
// This scope will:
// - Do the same as `CodeBufferCheckSCope`, but:
// - If managed by VIXL, always reserve space in the `CodeBuffer`.
// - Always check the size (exact or maximum) of the generated code on
// destruction.
// - Emit pools if the specified size would push them out of range.
// - Block pools emission for the duration of the scope.
// This scope allows the `Assembler` and `MacroAssembler` to be freely and
// safely mixed for its duration.
class EmissionCheckScope : public CodeBufferCheckScope {
public:
// This constructor implicitly calls `Open` (when `masm` is not `NULL`) to
// initialise the scope, so it is ready to use immediately after it has been
// constructed.
EmissionCheckScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kMaximumSize) {
Open(masm, size, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
EmissionCheckScope() {}
virtual ~EmissionCheckScope() { Close(); }
enum PoolPolicy {
// Do not forbid pool emission inside the scope. Pools will not be emitted
// on `Open` either.
kIgnorePools,
// Force pools to be generated on `Open` if necessary and block their
// emission inside the scope.
kBlockPools,
// Deprecated, but kept for backward compatibility.
kCheckPools = kBlockPools
};
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kMaximumSize) {
Open(masm, size, size_policy, kBlockPools);
}
void Close() {
if (!initialised_) {
return;
}
if (masm_ == NULL) {
// Nothing to do.
return;
}
// Perform the opposite of `Open`, which is:
// - Check the code generation limit was not exceeded.
// - Release the pools.
CodeBufferCheckScope::Close();
if (pool_policy_ == kBlockPools) {
masm_->ReleasePools();
}
VIXL_ASSERT(!initialised_);
}
protected:
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy,
PoolPolicy pool_policy) {
if (masm == NULL) {
// Nothing to do.
// We may reach this point in a context of conditional code generation.
// See `aarch64::MacroAssembler::MoveImmediateHelper()` for an example.
return;
}
masm_ = masm;
pool_policy_ = pool_policy;
if (pool_policy_ == kBlockPools) {
// To avoid duplicating the work to check that enough space is available
// in the buffer, do not use the more generic `EnsureEmitFor()`. It is
// done below when opening `CodeBufferCheckScope`.
masm->EnsureEmitPoolsFor(size);
masm->BlockPools();
}
// The buffer should be checked *after* we emit the pools.
CodeBufferCheckScope::Open(masm->AsAssemblerBase(),
size,
kReserveBufferSpace,
size_policy);
VIXL_ASSERT(initialised_);
}
// This constructor should only be used from code that is *currently
// generating* the pools, to avoid an infinite loop.
EmissionCheckScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy,
PoolPolicy pool_policy) {
Open(masm, size, size_policy, pool_policy);
}
MacroAssemblerInterface* masm_;
PoolPolicy pool_policy_;
};
// Use this scope when you need a one-to-one mapping between methods and
// instructions. This scope will:
// - Do the same as `EmissionCheckScope`.
// - Block access to the MacroAssemblerInterface (using run-time assertions).
class ExactAssemblyScope : public EmissionCheckScope {
public:
// This constructor implicitly calls `Open` (when `masm` is not `NULL`) to
// initialise the scope, so it is ready to use immediately after it has been
// constructed.
ExactAssemblyScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kExactSize) {
Open(masm, size, size_policy);
}
// This constructor does not implicitly initialise the scope. Instead, the
// user is required to explicitly call the `Open` function before using the
// scope.
ExactAssemblyScope() {}
virtual ~ExactAssemblyScope() { Close(); }
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy = kExactSize) {
Open(masm, size, size_policy, kBlockPools);
}
void Close() {
if (!initialised_) {
return;
}
if (masm_ == NULL) {
// Nothing to do.
return;
}
#ifdef VIXL_DEBUG
masm_->SetAllowMacroInstructions(previous_allow_macro_assembler_);
#else
USE(previous_allow_macro_assembler_);
#endif
EmissionCheckScope::Close();
}
protected:
// This protected constructor allows overriding the pool policy. It is
// available to allow this scope to be used in code that handles generation
// of pools.
ExactAssemblyScope(MacroAssemblerInterface* masm,
size_t size,
SizePolicy assert_policy,
PoolPolicy pool_policy) {
Open(masm, size, assert_policy, pool_policy);
}
void Open(MacroAssemblerInterface* masm,
size_t size,
SizePolicy size_policy,
PoolPolicy pool_policy) {
VIXL_ASSERT(size_policy != kNoAssert);
if (masm == NULL) {
// Nothing to do.
return;
}
// Rely on EmissionCheckScope::Open to initialise `masm_` and
// `pool_policy_`.
EmissionCheckScope::Open(masm, size, size_policy, pool_policy);
#ifdef VIXL_DEBUG
previous_allow_macro_assembler_ = masm->AllowMacroInstructions();
masm->SetAllowMacroInstructions(false);
#endif
}
private:
bool previous_allow_macro_assembler_;
};
} // namespace vixl
#endif // VIXL_CODE_GENERATION_SCOPES_H_

View file

@ -0,0 +1,160 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_COMPILER_INTRINSICS_H
#define VIXL_COMPILER_INTRINSICS_H
#include "globals-vixl.h"
namespace vixl {
// Helper to check whether the version of GCC used is greater than the specified
// requirement.
#define MAJOR 1000000
#define MINOR 1000
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR) + __GNUC_PATCHLEVEL__) >= \
((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel)))
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
((__GNUC__ * (MAJOR) + __GNUC_MINOR__ * (MINOR)) >= \
((major) * (MAJOR) + ((minor)) * (MINOR) + (patchlevel)))
#else
#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0
#endif
#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS)
// clang-format off
#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb))
#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs))
#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
// clang-format on
#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS)
// The documentation for these builtins is available at:
// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html
// clang-format off
# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0))
# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0))
# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0))
# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0))
# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0))
// clang-format on
#else
// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually
// implemented C++ methods.
// clang-format off
#define COMPILER_HAS_BUILTIN_BSWAP false
#define COMPILER_HAS_BUILTIN_CLRSB false
#define COMPILER_HAS_BUILTIN_CLZ false
#define COMPILER_HAS_BUILTIN_CTZ false
#define COMPILER_HAS_BUILTIN_FFS false
#define COMPILER_HAS_BUILTIN_POPCOUNT false
// clang-format on
#endif
template <typename V>
inline bool IsPowerOf2(V value) {
return (value != 0) && ((value & (value - 1)) == 0);
}
// Declaration of fallback functions.
int CountLeadingSignBitsFallBack(int64_t value, int width);
int CountLeadingZerosFallBack(uint64_t value, int width);
int CountSetBitsFallBack(uint64_t value, int width);
int CountTrailingZerosFallBack(uint64_t value, int width);
// Implementation of intrinsics functions.
// TODO: The implementations could be improved for sizes different from 32bit
// and 64bit: we could mask the values and call the appropriate builtin.
template <typename V>
inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_CLRSB
if (width == 32) {
return __builtin_clrsb(value);
} else if (width == 64) {
return __builtin_clrsbll(value);
}
#endif
return CountLeadingSignBitsFallBack(value, width);
}
template <typename V>
inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_CLZ
if (width == 32) {
return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value));
} else if (width == 64) {
return (value == 0) ? 64 : __builtin_clzll(value);
}
#endif
return CountLeadingZerosFallBack(value, width);
}
template <typename V>
inline int CountSetBits(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_POPCOUNT
if (width == 32) {
return __builtin_popcount(static_cast<unsigned>(value));
} else if (width == 64) {
return __builtin_popcountll(value);
}
#endif
return CountSetBitsFallBack(value, width);
}
template <typename V>
inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) {
#if COMPILER_HAS_BUILTIN_CTZ
if (width == 32) {
return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value));
} else if (width == 64) {
return (value == 0) ? 64 : __builtin_ctzll(value);
}
#endif
return CountTrailingZerosFallBack(value, width);
}
} // namespace vixl
#endif // VIXL_COMPILER_INTRINSICS_H

View file

@ -0,0 +1,364 @@
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_CPU_FEATURES_H
#define VIXL_CPU_FEATURES_H
#include <ostream>
#include "globals-vixl.h"
namespace vixl {
// clang-format off
#define VIXL_CPU_FEATURE_LIST(V) \
/* If set, the OS traps and emulates MRS accesses to relevant (EL1) ID_* */ \
/* registers, so that the detailed feature registers can be read */ \
/* directly. */ \
V(kIDRegisterEmulation, "ID register emulation", "cpuid") \
\
V(kFP, "FP", "fp") \
V(kNEON, "NEON", "asimd") \
V(kCRC32, "CRC32", "crc32") \
/* Cryptographic support instructions. */ \
V(kAES, "AES", "aes") \
V(kSHA1, "SHA1", "sha1") \
V(kSHA2, "SHA2", "sha2") \
/* A form of PMULL{2} with a 128-bit (1Q) result. */ \
V(kPmull1Q, "Pmull1Q", "pmull") \
/* Atomic operations on memory: CAS, LDADD, STADD, SWP, etc. */ \
V(kAtomics, "Atomics", "atomics") \
/* Limited ordering regions: LDLAR, STLLR and their variants. */ \
V(kLORegions, "LORegions", NULL) \
/* Rounding doubling multiply add/subtract: SQRDMLAH and SQRDMLSH. */ \
V(kRDM, "RDM", "asimdrdm") \
/* SDOT and UDOT support (in NEON). */ \
V(kDotProduct, "DotProduct", "asimddp") \
/* Half-precision (FP16) support for FP and NEON, respectively. */ \
V(kFPHalf, "FPHalf", "fphp") \
V(kNEONHalf, "NEONHalf", "asimdhp") \
/* The RAS extension, including the ESB instruction. */ \
V(kRAS, "RAS", NULL) \
/* Data cache clean to the point of persistence: DC CVAP. */ \
V(kDCPoP, "DCPoP", "dcpop") \
/* Cryptographic support instructions. */ \
V(kSHA3, "SHA3", "sha3") \
V(kSHA512, "SHA512", "sha512") \
V(kSM3, "SM3", "sm3") \
V(kSM4, "SM4", "sm4") \
/* Pointer authentication for addresses. */ \
V(kPAuth, "PAuth", NULL) \
/* Pointer authentication for addresses uses QARMA. */ \
V(kPAuthQARMA, "PAuthQARMA", NULL) \
/* Generic authentication (using the PACGA instruction). */ \
V(kPAuthGeneric, "PAuthGeneric", NULL) \
/* Generic authentication uses QARMA. */ \
V(kPAuthGenericQARMA, "PAuthGenericQARMA", NULL) \
/* JavaScript-style FP <-> integer conversion instruction: FJCVTZS. */ \
V(kJSCVT, "JSCVT", "jscvt") \
/* RCpc-based model (for weaker release consistency): LDAPR and variants. */ \
V(kRCpc, "RCpc", "lrcpc") \
/* Complex number support for NEON: FCMLA and FCADD. */ \
V(kFcma, "Fcma", "fcma")
// clang-format on
class CPUFeaturesConstIterator;
// A representation of the set of features known to be supported by the target
// device. Each feature is represented by a simple boolean flag.
//
// - When the Assembler is asked to assemble an instruction, it asserts (in
// debug mode) that the necessary features are available.
//
// - TODO: The MacroAssembler relies on the Assembler's assertions, but in
// some cases it may be useful for macros to generate a fall-back sequence
// in case features are not available.
//
// - The Simulator assumes by default that all features are available, but it
// is possible to configure it to fail if the simulated code uses features
// that are not enabled.
//
// The Simulator also offers pseudo-instructions to allow features to be
// enabled and disabled dynamically. This is useful when you want to ensure
// that some features are constrained to certain areas of code.
//
// - The base Disassembler knows nothing about CPU features, but the
// PrintDisassembler can be configured to annotate its output with warnings
// about unavailable features. The Simulator uses this feature when
// instruction trace is enabled.
//
// - The Decoder-based components -- the Simulator and PrintDisassembler --
// rely on a CPUFeaturesAuditor visitor. This visitor keeps a list of
// features actually encountered so that a large block of code can be
// examined (either directly or through simulation), and the required
// features analysed later.
//
// Expected usage:
//
// // By default, VIXL uses CPUFeatures::AArch64LegacyBaseline(), for
// // compatibility with older version of VIXL.
// MacroAssembler masm;
//
// // Generate code only for the current CPU.
// masm.SetCPUFeatures(CPUFeatures::InferFromOS());
//
// // Turn off feature checking entirely.
// masm.SetCPUFeatures(CPUFeatures::All());
//
// Feature set manipulation:
//
// CPUFeatures f; // The default constructor gives an empty set.
// // Individual features can be added (or removed).
// f.Combine(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::AES);
// f.Remove(CPUFeatures::kNEON);
//
// // Some helpers exist for extensions that provide several features.
// f.Remove(CPUFeatures::All());
// f.Combine(CPUFeatures::AArch64LegacyBaseline());
//
// // Chained construction is also possible.
// CPUFeatures g =
// f.With(CPUFeatures::kPmull1Q).Without(CPUFeatures::kCRC32);
//
// // Features can be queried. Where multiple features are given, they are
// // combined with logical AND.
// if (h.Has(CPUFeatures::kNEON)) { ... }
// if (h.Has(CPUFeatures::kFP, CPUFeatures::kNEON)) { ... }
// if (h.Has(g)) { ... }
// // If the empty set is requested, the result is always 'true'.
// VIXL_ASSERT(h.Has(CPUFeatures()));
//
// // For debug and reporting purposes, features can be enumerated (or
// // printed directly):
// std::cout << CPUFeatures::kNEON; // Prints something like "NEON".
// std::cout << f; // Prints something like "FP, NEON, CRC32".
class CPUFeatures {
public:
// clang-format off
// Individual features.
// These should be treated as opaque tokens. User code should not rely on
// specific numeric values or ordering.
enum Feature {
// Refer to VIXL_CPU_FEATURE_LIST (above) for the list of feature names that
// this class supports.
kNone = -1,
#define VIXL_DECLARE_FEATURE(SYMBOL, NAME, CPUINFO) SYMBOL,
VIXL_CPU_FEATURE_LIST(VIXL_DECLARE_FEATURE)
#undef VIXL_DECLARE_FEATURE
kNumberOfFeatures
};
// clang-format on
// By default, construct with no features enabled.
CPUFeatures() : features_(0) {}
// Construct with some features already enabled.
CPUFeatures(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone);
// Construct with all features enabled. This can be used to disable feature
// checking: `Has(...)` returns true regardless of the argument.
static CPUFeatures All();
// Construct an empty CPUFeatures. This is equivalent to the default
// constructor, but is provided for symmetry and convenience.
static CPUFeatures None() { return CPUFeatures(); }
// The presence of these features was assumed by version of VIXL before this
// API was added, so using this set by default ensures API compatibility.
static CPUFeatures AArch64LegacyBaseline() {
return CPUFeatures(kFP, kNEON, kCRC32);
}
// Construct a new CPUFeatures object based on what the OS reports.
static CPUFeatures InferFromOS();
// Combine another CPUFeatures object into this one. Features that already
// exist in this set are left unchanged.
void Combine(const CPUFeatures& other);
// Combine specific features into this set. Features that already exist in
// this set are left unchanged.
void Combine(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone);
// Remove features in another CPUFeatures object from this one.
void Remove(const CPUFeatures& other);
// Remove specific features from this set.
void Remove(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone);
// Chaining helpers for convenient construction.
CPUFeatures With(const CPUFeatures& other) const;
CPUFeatures With(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone) const;
CPUFeatures Without(const CPUFeatures& other) const;
CPUFeatures Without(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone) const;
// Query features.
// Note that an empty query (like `Has(kNone)`) always returns true.
bool Has(const CPUFeatures& other) const;
bool Has(Feature feature0,
Feature feature1 = kNone,
Feature feature2 = kNone,
Feature feature3 = kNone) const;
// Return the number of enabled features.
size_t Count() const;
// Check for equivalence.
bool operator==(const CPUFeatures& other) const {
return Has(other) && other.Has(*this);
}
bool operator!=(const CPUFeatures& other) const { return !(*this == other); }
typedef CPUFeaturesConstIterator const_iterator;
const_iterator begin() const;
const_iterator end() const;
private:
// Each bit represents a feature. This field will be replaced as needed if
// features are added.
uint64_t features_;
friend std::ostream& operator<<(std::ostream& os,
const vixl::CPUFeatures& features);
};
std::ostream& operator<<(std::ostream& os, vixl::CPUFeatures::Feature feature);
std::ostream& operator<<(std::ostream& os, const vixl::CPUFeatures& features);
// This is not a proper C++ iterator type, but it simulates enough of
// ForwardIterator that simple loops can be written.
class CPUFeaturesConstIterator {
public:
CPUFeaturesConstIterator(const CPUFeatures* cpu_features = NULL,
CPUFeatures::Feature start = CPUFeatures::kNone)
: cpu_features_(cpu_features), feature_(start) {
VIXL_ASSERT(IsValid());
}
bool operator==(const CPUFeaturesConstIterator& other) const;
bool operator!=(const CPUFeaturesConstIterator& other) const {
return !(*this == other);
}
CPUFeatures::Feature operator++();
CPUFeatures::Feature operator++(int);
CPUFeatures::Feature operator*() const {
VIXL_ASSERT(IsValid());
return feature_;
}
// For proper support of C++'s simplest "Iterator" concept, this class would
// have to define member types (such as CPUFeaturesIterator::pointer) to make
// it appear as if it iterates over Feature objects in memory. That is, we'd
// need CPUFeatures::iterator to behave like std::vector<Feature>::iterator.
// This is at least partially possible -- the std::vector<bool> specialisation
// does something similar -- but it doesn't seem worthwhile for a
// special-purpose debug helper, so they are omitted here.
private:
const CPUFeatures* cpu_features_;
CPUFeatures::Feature feature_;
bool IsValid() const {
return ((cpu_features_ == NULL) && (feature_ == CPUFeatures::kNone)) ||
cpu_features_->Has(feature_);
}
};
// A convenience scope for temporarily modifying a CPU features object. This
// allows features to be enabled for short sequences.
//
// Expected usage:
//
// {
// CPUFeaturesScope cpu(&masm, CPUFeatures::kCRC32);
// // This scope can now use CRC32, as well as anything else that was enabled
// // before the scope.
//
// ...
//
// // At the end of the scope, the original CPU features are restored.
// }
class CPUFeaturesScope {
public:
// Start a CPUFeaturesScope on any object that implements
// `CPUFeatures* GetCPUFeatures()`.
template <typename T>
explicit CPUFeaturesScope(T* cpu_features_wrapper,
CPUFeatures::Feature feature0 = CPUFeatures::kNone,
CPUFeatures::Feature feature1 = CPUFeatures::kNone,
CPUFeatures::Feature feature2 = CPUFeatures::kNone,
CPUFeatures::Feature feature3 = CPUFeatures::kNone)
: cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
old_features_(*cpu_features_) {
cpu_features_->Combine(feature0, feature1, feature2, feature3);
}
template <typename T>
CPUFeaturesScope(T* cpu_features_wrapper, const CPUFeatures& other)
: cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
old_features_(*cpu_features_) {
cpu_features_->Combine(other);
}
~CPUFeaturesScope() { *cpu_features_ = old_features_; }
// For advanced usage, the CPUFeatures object can be accessed directly.
// The scope will restore the original state when it ends.
CPUFeatures* GetCPUFeatures() const { return cpu_features_; }
void SetCPUFeatures(const CPUFeatures& cpu_features) {
*cpu_features_ = cpu_features;
}
private:
CPUFeatures* const cpu_features_;
const CPUFeatures old_features_;
};
} // namespace vixl
#endif // VIXL_CPU_FEATURES_H

View file

@ -0,0 +1,284 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_GLOBALS_H
#define VIXL_GLOBALS_H
// Get standard C99 macros for integer types.
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
extern "C" {
#include <inttypes.h>
#include <stdint.h>
}
#include <cassert>
#include <cstdarg>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include "platform-vixl.h"
#ifdef VIXL_NEGATIVE_TESTING
#include <sstream>
#include <stdexcept>
#include <string>
#endif
namespace vixl {
typedef uint8_t byte;
const int KBytes = 1024;
const int MBytes = 1024 * KBytes;
const int kBitsPerByte = 8;
template <int SizeInBits>
struct Unsigned;
template <>
struct Unsigned<32> {
typedef uint32_t type;
};
template <>
struct Unsigned<64> {
typedef uint64_t type;
};
} // namespace vixl
// Detect the host's pointer size.
#if (UINTPTR_MAX == UINT32_MAX)
#define VIXL_HOST_POINTER_32
#elif (UINTPTR_MAX == UINT64_MAX)
#define VIXL_HOST_POINTER_64
#else
#error "Unsupported host pointer size."
#endif
#ifdef VIXL_NEGATIVE_TESTING
#define VIXL_ABORT() \
do { \
std::ostringstream oss; \
oss << "Aborting in " << __FILE__ << ", line " << __LINE__ << std::endl; \
throw std::runtime_error(oss.str()); \
} while (false)
#define VIXL_ABORT_WITH_MSG(msg) \
do { \
std::ostringstream oss; \
oss << (msg) << "in " << __FILE__ << ", line " << __LINE__ << std::endl; \
throw std::runtime_error(oss.str()); \
} while (false)
#define VIXL_CHECK(condition) \
do { \
if (!(condition)) { \
std::ostringstream oss; \
oss << "Assertion failed (" #condition ")\nin "; \
oss << __FILE__ << ", line " << __LINE__ << std::endl; \
throw std::runtime_error(oss.str()); \
} \
} while (false)
#else
#define VIXL_ABORT() \
do { \
printf("Aborting in %s, line %i\n", __FILE__, __LINE__); \
abort(); \
} while (false)
#define VIXL_ABORT_WITH_MSG(msg) \
do { \
printf("%sin %s, line %i\n", (msg), __FILE__, __LINE__); \
abort(); \
} while (false)
#define VIXL_CHECK(condition) \
do { \
if (!(condition)) { \
printf("Assertion failed (%s)\nin %s, line %i\n", \
#condition, \
__FILE__, \
__LINE__); \
abort(); \
} \
} while (false)
#endif
#ifdef VIXL_DEBUG
#define VIXL_ASSERT(condition) VIXL_CHECK(condition)
#define VIXL_UNIMPLEMENTED() \
do { \
VIXL_ABORT_WITH_MSG("UNIMPLEMENTED "); \
} while (false)
#define VIXL_UNREACHABLE() \
do { \
VIXL_ABORT_WITH_MSG("UNREACHABLE "); \
} while (false)
#else
#define VIXL_ASSERT(condition) ((void)0)
#define VIXL_UNIMPLEMENTED() ((void)0)
#define VIXL_UNREACHABLE() ((void)0)
#endif
// This is not as powerful as template based assertions, but it is simple.
// It assumes that the descriptions are unique. If this starts being a problem,
// we can switch to a different implemention.
#define VIXL_CONCAT(a, b) a##b
#if __cplusplus >= 201103L
#define VIXL_STATIC_ASSERT_LINE(line_unused, condition, message) \
static_assert(condition, message)
#else
#define VIXL_STATIC_ASSERT_LINE(line, condition, message_unused) \
typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
__attribute__((unused))
#endif
#define VIXL_STATIC_ASSERT(condition) \
VIXL_STATIC_ASSERT_LINE(__LINE__, condition, "")
#define VIXL_STATIC_ASSERT_MESSAGE(condition, message) \
VIXL_STATIC_ASSERT_LINE(__LINE__, condition, message)
#define VIXL_WARNING(message) \
do { \
printf("WARNING in %s, line %i: %s", __FILE__, __LINE__, message); \
} while (false)
template <typename T1>
inline void USE(const T1&) {}
template <typename T1, typename T2>
inline void USE(const T1&, const T2&) {}
template <typename T1, typename T2, typename T3>
inline void USE(const T1&, const T2&, const T3&) {}
template <typename T1, typename T2, typename T3, typename T4>
inline void USE(const T1&, const T2&, const T3&, const T4&) {}
#define VIXL_ALIGNMENT_EXCEPTION() \
do { \
fprintf(stderr, "ALIGNMENT EXCEPTION\t"); \
VIXL_ABORT(); \
} while (0)
// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
// argument to annotate intentional fall-through between switch labels.
// For more information please refer to:
// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
#ifndef __has_warning
#define __has_warning(x) 0
#endif
// Fallthrough annotation for Clang and C++11(201103L).
#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
#define VIXL_FALLTHROUGH() [[clang::fallthrough]]
// Fallthrough annotation for GCC >= 7.
#elif __GNUC__ >= 7
#define VIXL_FALLTHROUGH() __attribute__((fallthrough))
#else
#define VIXL_FALLTHROUGH() \
do { \
} while (0)
#endif
#if __cplusplus >= 201103L
#define VIXL_NO_RETURN [[noreturn]]
#else
#define VIXL_NO_RETURN __attribute__((noreturn))
#endif
#ifdef VIXL_DEBUG
#define VIXL_NO_RETURN_IN_DEBUG_MODE VIXL_NO_RETURN
#else
#define VIXL_NO_RETURN_IN_DEBUG_MODE
#endif
#if __cplusplus >= 201103L
#define VIXL_OVERRIDE override
#else
#define VIXL_OVERRIDE
#endif
// Some functions might only be marked as "noreturn" for the DEBUG build. This
// macro should be used for such cases (for more details see what
// VIXL_UNREACHABLE expands to).
#ifdef VIXL_DEBUG
#define VIXL_DEBUG_NO_RETURN VIXL_NO_RETURN
#else
#define VIXL_DEBUG_NO_RETURN
#endif
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 1
#endif
#else
#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 0
#endif
#if VIXL_AARCH64_GENERATE_SIMULATOR_CODE
#warning "Generating Simulator instructions without Simulator support."
#endif
#endif
// We do not have a simulator for AArch32, although we can pretend we do so that
// tests that require running natively can be skipped.
#ifndef __arm__
#define VIXL_INCLUDE_SIMULATOR_AARCH32
#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 1
#endif
#else
#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 0
#endif
#endif
#ifdef USE_SIMULATOR
#error "Please see the release notes for USE_SIMULATOR."
#endif
// Target Architecture/ISA
#ifdef VIXL_INCLUDE_TARGET_A64
#define VIXL_INCLUDE_TARGET_AARCH64
#endif
#if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32)
#define VIXL_INCLUDE_TARGET_AARCH32
#elif defined(VIXL_INCLUDE_TARGET_A32)
#define VIXL_INCLUDE_TARGET_A32_ONLY
#else
#define VIXL_INCLUDE_TARGET_T32_ONLY
#endif
#endif // VIXL_GLOBALS_H

View file

@ -0,0 +1,915 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_INVALSET_H_
#define VIXL_INVALSET_H_
#include <cstring>
#include <algorithm>
#include <vector>
#include "globals-vixl.h"
namespace vixl {
// We define a custom data structure template and its iterator as `std`
// containers do not fit the performance requirements for some of our use cases.
//
// The structure behaves like an iterable unordered set with special properties
// and restrictions. "InvalSet" stands for "Invalidatable Set".
//
// Restrictions and requirements:
// - Adding an element already present in the set is illegal. In debug mode,
// this is checked at insertion time.
// - The templated class `ElementType` must provide comparison operators so that
// `std::sort()` can be used.
// - A key must be available to represent invalid elements.
// - Elements with an invalid key must compare higher or equal to any other
// element.
//
// Use cases and performance considerations:
// Our use cases present two specificities that allow us to design this
// structure to provide fast insertion *and* fast search and deletion
// operations:
// - Elements are (generally) inserted in order (sorted according to their key).
// - A key is available to mark elements as invalid (deleted).
// The backing `std::vector` allows for fast insertions. When
// searching for an element we ensure the elements are sorted (this is generally
// the case) and perform a binary search. When deleting an element we do not
// free the associated memory immediately. Instead, an element to be deleted is
// marked with the 'invalid' key. Other methods of the container take care of
// ignoring entries marked as invalid.
// To avoid the overhead of the `std::vector` container when only few entries
// are used, a number of elements are preallocated.
// 'ElementType' and 'KeyType' are respectively the types of the elements and
// their key. The structure only reclaims memory when safe to do so, if the
// number of elements that can be reclaimed is greater than `RECLAIM_FROM` and
// greater than `<total number of elements> / RECLAIM_FACTOR.
// clang-format off
#define TEMPLATE_INVALSET_P_DECL \
class ElementType, \
unsigned N_PREALLOCATED_ELEMENTS, \
class KeyType, \
KeyType INVALID_KEY, \
size_t RECLAIM_FROM, \
unsigned RECLAIM_FACTOR
// clang-format on
#define TEMPLATE_INVALSET_P_DEF \
ElementType, N_PREALLOCATED_ELEMENTS, KeyType, INVALID_KEY, RECLAIM_FROM, \
RECLAIM_FACTOR
template <class S>
class InvalSetIterator; // Forward declaration.
template <TEMPLATE_INVALSET_P_DECL>
class InvalSet {
public:
InvalSet();
~InvalSet();
static const size_t kNPreallocatedElements = N_PREALLOCATED_ELEMENTS;
static const KeyType kInvalidKey = INVALID_KEY;
// C++ STL iterator interface.
typedef InvalSetIterator<InvalSet<TEMPLATE_INVALSET_P_DEF> > iterator;
iterator begin();
iterator end();
// It is illegal to insert an element already present in the set.
void insert(const ElementType& element);
// Looks for the specified element in the set and - if found - deletes it.
// The return value is the number of elements erased: either 0 or 1.
size_t erase(const ElementType& element);
// This indicates the number of (valid) elements stored in this set.
size_t size() const;
// Returns true if no elements are stored in the set.
// Note that this does not mean the the backing storage is empty: it can still
// contain invalid elements.
bool empty() const;
void clear();
const ElementType GetMinElement();
// This returns the key of the minimum element in the set.
KeyType GetMinElementKey();
static bool IsValid(const ElementType& element);
static KeyType GetKey(const ElementType& element);
static void SetKey(ElementType* element, KeyType key);
typedef ElementType _ElementType;
typedef KeyType _KeyType;
protected:
// Returns a pointer to the element in vector_ if it was found, or NULL
// otherwise.
ElementType* Search(const ElementType& element);
// The argument *must* point to an element stored in *this* set.
// This function is not allowed to move elements in the backing vector
// storage.
void EraseInternal(ElementType* element);
// The elements in the range searched must be sorted.
ElementType* BinarySearch(const ElementType& element,
ElementType* start,
ElementType* end) const;
// Sort the elements.
enum SortType {
// The 'hard' version guarantees that invalid elements are moved to the end
// of the container.
kHardSort,
// The 'soft' version only guarantees that the elements will be sorted.
// Invalid elements may still be present anywhere in the set.
kSoftSort
};
void Sort(SortType sort_type);
// Delete the elements that have an invalid key. The complexity is linear
// with the size of the vector.
void Clean();
const ElementType Front() const;
const ElementType Back() const;
// Delete invalid trailing elements and return the last valid element in the
// set.
const ElementType CleanBack();
// Returns a pointer to the start or end of the backing storage.
const ElementType* StorageBegin() const;
const ElementType* StorageEnd() const;
ElementType* StorageBegin();
ElementType* StorageEnd();
// Returns the index of the element within the backing storage. The element
// must belong to the backing storage.
size_t GetElementIndex(const ElementType* element) const;
// Returns the element at the specified index in the backing storage.
const ElementType* GetElementAt(size_t index) const;
ElementType* GetElementAt(size_t index);
static const ElementType* GetFirstValidElement(const ElementType* from,
const ElementType* end);
void CacheMinElement();
const ElementType GetCachedMinElement() const;
bool ShouldReclaimMemory() const;
void ReclaimMemory();
bool IsUsingVector() const { return vector_ != NULL; }
void SetSorted(bool sorted) { sorted_ = sorted; }
// We cache some data commonly required by users to improve performance.
// We cannot cache pointers to elements as we do not control the backing
// storage.
bool valid_cached_min_;
size_t cached_min_index_; // Valid iff `valid_cached_min_` is true.
KeyType cached_min_key_; // Valid iff `valid_cached_min_` is true.
// Indicates whether the elements are sorted.
bool sorted_;
// This represents the number of (valid) elements in this set.
size_t size_;
// The backing storage is either the array of preallocated elements or the
// vector. The structure starts by using the preallocated elements, and
// transitions (permanently) to using the vector once more than
// kNPreallocatedElements are used.
// Elements are only invalidated when using the vector. The preallocated
// storage always only contains valid elements.
ElementType preallocated_[kNPreallocatedElements];
std::vector<ElementType>* vector_;
// Iterators acquire and release this monitor. While a set is acquired,
// certain operations are illegal to ensure that the iterator will
// correctly iterate over the elements in the set.
int monitor_;
#ifdef VIXL_DEBUG
int monitor() const { return monitor_; }
void Acquire() { monitor_++; }
void Release() {
monitor_--;
VIXL_ASSERT(monitor_ >= 0);
}
#endif
private:
// The copy constructor and assignment operator are not used and the defaults
// are unsafe, so disable them (without an implementation).
#if __cplusplus >= 201103L
InvalSet(const InvalSet& other) = delete;
InvalSet operator=(const InvalSet& other) = delete;
#else
InvalSet(const InvalSet& other);
InvalSet operator=(const InvalSet& other);
#endif
friend class InvalSetIterator<InvalSet<TEMPLATE_INVALSET_P_DEF> >;
};
template <class S>
class InvalSetIterator : public std::iterator<std::forward_iterator_tag,
typename S::_ElementType> {
private:
// Redefine types to mirror the associated set types.
typedef typename S::_ElementType ElementType;
typedef typename S::_KeyType KeyType;
public:
explicit InvalSetIterator(S* inval_set = NULL);
// This class implements the standard copy-swap idiom.
~InvalSetIterator();
InvalSetIterator(const InvalSetIterator<S>& other);
InvalSetIterator<S>& operator=(InvalSetIterator<S> other);
#if __cplusplus >= 201103L
InvalSetIterator(InvalSetIterator<S>&& other) noexcept;
#endif
friend void swap(InvalSetIterator<S>& a, InvalSetIterator<S>& b) {
using std::swap;
swap(a.using_vector_, b.using_vector_);
swap(a.index_, b.index_);
swap(a.inval_set_, b.inval_set_);
}
// Return true if the iterator is at the end of the set.
bool Done() const;
// Move this iterator to the end of the set.
void Finish();
// Delete the current element and advance the iterator to point to the next
// element.
void DeleteCurrentAndAdvance();
static bool IsValid(const ElementType& element);
static KeyType GetKey(const ElementType& element);
// Extra helpers to support the forward-iterator interface.
InvalSetIterator<S>& operator++(); // Pre-increment.
InvalSetIterator<S> operator++(int); // Post-increment.
bool operator==(const InvalSetIterator<S>& rhs) const;
bool operator!=(const InvalSetIterator<S>& rhs) const {
return !(*this == rhs);
}
ElementType& operator*() { return *Current(); }
const ElementType& operator*() const { return *Current(); }
ElementType* operator->() { return Current(); }
const ElementType* operator->() const { return Current(); }
protected:
void MoveToValidElement();
// Indicates if the iterator is looking at the vector or at the preallocated
// elements.
bool using_vector_;
// Used when looking at the preallocated elements, or in debug mode when using
// the vector to track how many times the iterator has advanced.
size_t index_;
typename std::vector<ElementType>::iterator iterator_;
S* inval_set_;
// TODO: These helpers are deprecated and will be removed in future versions
// of VIXL.
ElementType* Current() const;
void Advance();
};
template <TEMPLATE_INVALSET_P_DECL>
InvalSet<TEMPLATE_INVALSET_P_DEF>::InvalSet()
: valid_cached_min_(false), sorted_(true), size_(0), vector_(NULL) {
#ifdef VIXL_DEBUG
monitor_ = 0;
#endif
}
template <TEMPLATE_INVALSET_P_DECL>
InvalSet<TEMPLATE_INVALSET_P_DEF>::~InvalSet() {
VIXL_ASSERT(monitor_ == 0);
delete vector_;
}
template <TEMPLATE_INVALSET_P_DECL>
typename InvalSet<TEMPLATE_INVALSET_P_DEF>::iterator
InvalSet<TEMPLATE_INVALSET_P_DEF>::begin() {
return iterator(this);
}
template <TEMPLATE_INVALSET_P_DECL>
typename InvalSet<TEMPLATE_INVALSET_P_DEF>::iterator
InvalSet<TEMPLATE_INVALSET_P_DEF>::end() {
iterator end(this);
end.Finish();
return end;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::insert(const ElementType& element) {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(IsValid(element));
VIXL_ASSERT(Search(element) == NULL);
SetSorted(empty() || (sorted_ && (element > CleanBack())));
if (IsUsingVector()) {
vector_->push_back(element);
} else {
if (size_ < kNPreallocatedElements) {
preallocated_[size_] = element;
} else {
// Transition to using the vector.
vector_ =
new std::vector<ElementType>(preallocated_, preallocated_ + size_);
vector_->push_back(element);
}
}
size_++;
if (valid_cached_min_ && (element < GetMinElement())) {
cached_min_index_ = IsUsingVector() ? vector_->size() - 1 : size_ - 1;
cached_min_key_ = GetKey(element);
valid_cached_min_ = true;
}
if (ShouldReclaimMemory()) {
ReclaimMemory();
}
}
template <TEMPLATE_INVALSET_P_DECL>
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::erase(const ElementType& element) {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(IsValid(element));
ElementType* local_element = Search(element);
if (local_element != NULL) {
EraseInternal(local_element);
return 1;
}
return 0;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::Search(
const ElementType& element) {
VIXL_ASSERT(monitor() == 0);
if (empty()) {
return NULL;
}
if (ShouldReclaimMemory()) {
ReclaimMemory();
}
if (!sorted_) {
Sort(kHardSort);
}
if (!valid_cached_min_) {
CacheMinElement();
}
return BinarySearch(element, GetElementAt(cached_min_index_), StorageEnd());
}
template <TEMPLATE_INVALSET_P_DECL>
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::size() const {
return size_;
}
template <TEMPLATE_INVALSET_P_DECL>
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::empty() const {
return size_ == 0;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::clear() {
VIXL_ASSERT(monitor() == 0);
size_ = 0;
if (IsUsingVector()) {
vector_->clear();
}
SetSorted(true);
valid_cached_min_ = false;
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::GetMinElement() {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(!empty());
CacheMinElement();
return *GetElementAt(cached_min_index_);
}
template <TEMPLATE_INVALSET_P_DECL>
KeyType InvalSet<TEMPLATE_INVALSET_P_DEF>::GetMinElementKey() {
VIXL_ASSERT(monitor() == 0);
if (valid_cached_min_) {
return cached_min_key_;
} else {
return GetKey(GetMinElement());
}
}
template <TEMPLATE_INVALSET_P_DECL>
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::IsValid(const ElementType& element) {
return GetKey(element) != kInvalidKey;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::EraseInternal(ElementType* element) {
// Note that this function must be safe even while an iterator has acquired
// this set.
VIXL_ASSERT(element != NULL);
size_t deleted_index = GetElementIndex(element);
if (IsUsingVector()) {
VIXL_ASSERT((&(vector_->front()) <= element) &&
(element <= &(vector_->back())));
SetKey(element, kInvalidKey);
} else {
VIXL_ASSERT((preallocated_ <= element) &&
(element < (preallocated_ + kNPreallocatedElements)));
ElementType* end = preallocated_ + kNPreallocatedElements;
size_t copy_size = sizeof(*element) * (end - element - 1);
memmove(element, element + 1, copy_size);
}
size_--;
if (valid_cached_min_ && (deleted_index == cached_min_index_)) {
if (sorted_ && !empty()) {
const ElementType* min = GetFirstValidElement(element, StorageEnd());
cached_min_index_ = GetElementIndex(min);
cached_min_key_ = GetKey(*min);
valid_cached_min_ = true;
} else {
valid_cached_min_ = false;
}
}
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::BinarySearch(
const ElementType& element, ElementType* start, ElementType* end) const {
if (start == end) {
return NULL;
}
VIXL_ASSERT(sorted_);
VIXL_ASSERT(start < end);
VIXL_ASSERT(!empty());
// Perform a binary search through the elements while ignoring invalid
// elements.
ElementType* elements = start;
size_t low = 0;
size_t high = (end - start) - 1;
while (low < high) {
// Find valid bounds.
while (!IsValid(elements[low]) && (low < high)) ++low;
while (!IsValid(elements[high]) && (low < high)) --high;
VIXL_ASSERT(low <= high);
// Avoid overflow when computing the middle index.
size_t middle = low + (high - low) / 2;
if ((middle == low) || (middle == high)) {
break;
}
while ((middle < high - 1) && !IsValid(elements[middle])) ++middle;
while ((low + 1 < middle) && !IsValid(elements[middle])) --middle;
if (!IsValid(elements[middle])) {
break;
}
if (elements[middle] < element) {
low = middle;
} else {
high = middle;
}
}
if (elements[low] == element) return &elements[low];
if (elements[high] == element) return &elements[high];
return NULL;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Sort(SortType sort_type) {
if (sort_type == kSoftSort) {
if (sorted_) {
return;
}
}
VIXL_ASSERT(monitor() == 0);
if (empty()) {
return;
}
Clean();
std::sort(StorageBegin(), StorageEnd());
SetSorted(true);
cached_min_index_ = 0;
cached_min_key_ = GetKey(Front());
valid_cached_min_ = true;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::Clean() {
VIXL_ASSERT(monitor() == 0);
if (empty() || !IsUsingVector()) {
return;
}
// Manually iterate through the vector storage to discard invalid elements.
ElementType* start = &(vector_->front());
ElementType* end = start + vector_->size();
ElementType* c = start;
ElementType* first_invalid;
ElementType* first_valid;
ElementType* next_invalid;
while ((c < end) && IsValid(*c)) c++;
first_invalid = c;
while (c < end) {
while ((c < end) && !IsValid(*c)) c++;
first_valid = c;
while ((c < end) && IsValid(*c)) c++;
next_invalid = c;
ptrdiff_t n_moved_elements = (next_invalid - first_valid);
memmove(first_invalid, first_valid, n_moved_elements * sizeof(*c));
first_invalid = first_invalid + n_moved_elements;
c = next_invalid;
}
// Delete the trailing invalid elements.
vector_->erase(vector_->begin() + (first_invalid - start), vector_->end());
VIXL_ASSERT(vector_->size() == size_);
if (sorted_) {
valid_cached_min_ = true;
cached_min_index_ = 0;
cached_min_key_ = GetKey(*GetElementAt(0));
} else {
valid_cached_min_ = false;
}
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Front() const {
VIXL_ASSERT(!empty());
return IsUsingVector() ? vector_->front() : preallocated_[0];
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::Back() const {
VIXL_ASSERT(!empty());
return IsUsingVector() ? vector_->back() : preallocated_[size_ - 1];
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType InvalSet<TEMPLATE_INVALSET_P_DEF>::CleanBack() {
VIXL_ASSERT(monitor() == 0);
if (IsUsingVector()) {
// Delete the invalid trailing elements.
typename std::vector<ElementType>::reverse_iterator it = vector_->rbegin();
while (!IsValid(*it)) {
it++;
}
vector_->erase(it.base(), vector_->end());
}
return Back();
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() const {
return IsUsingVector() ? &(vector_->front()) : preallocated_;
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() const {
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageBegin() {
return IsUsingVector() ? &(vector_->front()) : preallocated_;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::StorageEnd() {
return IsUsingVector() ? &(vector_->back()) + 1 : preallocated_ + size_;
}
template <TEMPLATE_INVALSET_P_DECL>
size_t InvalSet<TEMPLATE_INVALSET_P_DEF>::GetElementIndex(
const ElementType* element) const {
VIXL_ASSERT((StorageBegin() <= element) && (element < StorageEnd()));
return element - StorageBegin();
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::GetElementAt(
size_t index) const {
VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) ||
(index < size_));
return StorageBegin() + index;
}
template <TEMPLATE_INVALSET_P_DECL>
ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::GetElementAt(size_t index) {
VIXL_ASSERT((IsUsingVector() && (index < vector_->size())) ||
(index < size_));
return StorageBegin() + index;
}
template <TEMPLATE_INVALSET_P_DECL>
const ElementType* InvalSet<TEMPLATE_INVALSET_P_DEF>::GetFirstValidElement(
const ElementType* from, const ElementType* end) {
while ((from < end) && !IsValid(*from)) {
from++;
}
return from;
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::CacheMinElement() {
VIXL_ASSERT(monitor() == 0);
VIXL_ASSERT(!empty());
if (valid_cached_min_) {
return;
}
if (sorted_) {
const ElementType* min = GetFirstValidElement(StorageBegin(), StorageEnd());
cached_min_index_ = GetElementIndex(min);
cached_min_key_ = GetKey(*min);
valid_cached_min_ = true;
} else {
Sort(kHardSort);
}
VIXL_ASSERT(valid_cached_min_);
}
template <TEMPLATE_INVALSET_P_DECL>
bool InvalSet<TEMPLATE_INVALSET_P_DEF>::ShouldReclaimMemory() const {
if (!IsUsingVector()) {
return false;
}
size_t n_invalid_elements = vector_->size() - size_;
return (n_invalid_elements > RECLAIM_FROM) &&
(n_invalid_elements > vector_->size() / RECLAIM_FACTOR);
}
template <TEMPLATE_INVALSET_P_DECL>
void InvalSet<TEMPLATE_INVALSET_P_DEF>::ReclaimMemory() {
VIXL_ASSERT(monitor() == 0);
Clean();
}
template <class S>
InvalSetIterator<S>::InvalSetIterator(S* inval_set)
: using_vector_((inval_set != NULL) && inval_set->IsUsingVector()),
index_(0),
inval_set_(inval_set) {
if (inval_set != NULL) {
inval_set->Sort(S::kSoftSort);
#ifdef VIXL_DEBUG
inval_set->Acquire();
#endif
if (using_vector_) {
iterator_ = typename std::vector<ElementType>::iterator(
inval_set_->vector_->begin());
}
MoveToValidElement();
}
}
template <class S>
InvalSetIterator<S>::~InvalSetIterator() {
#ifdef VIXL_DEBUG
if (inval_set_ != NULL) inval_set_->Release();
#endif
}
template <class S>
typename S::_ElementType* InvalSetIterator<S>::Current() const {
VIXL_ASSERT(!Done());
if (using_vector_) {
return &(*iterator_);
} else {
return &(inval_set_->preallocated_[index_]);
}
}
template <class S>
void InvalSetIterator<S>::Advance() {
++(*this);
}
template <class S>
bool InvalSetIterator<S>::Done() const {
if (using_vector_) {
bool done = (iterator_ == inval_set_->vector_->end());
VIXL_ASSERT(done == (index_ == inval_set_->size()));
return done;
} else {
return index_ == inval_set_->size();
}
}
template <class S>
void InvalSetIterator<S>::Finish() {
VIXL_ASSERT(inval_set_->sorted_);
if (using_vector_) {
iterator_ = inval_set_->vector_->end();
}
index_ = inval_set_->size();
}
template <class S>
void InvalSetIterator<S>::DeleteCurrentAndAdvance() {
if (using_vector_) {
inval_set_->EraseInternal(&(*iterator_));
MoveToValidElement();
} else {
inval_set_->EraseInternal(inval_set_->preallocated_ + index_);
}
}
template <class S>
bool InvalSetIterator<S>::IsValid(const ElementType& element) {
return S::IsValid(element);
}
template <class S>
typename S::_KeyType InvalSetIterator<S>::GetKey(const ElementType& element) {
return S::GetKey(element);
}
template <class S>
void InvalSetIterator<S>::MoveToValidElement() {
if (using_vector_) {
while ((iterator_ != inval_set_->vector_->end()) && !IsValid(*iterator_)) {
iterator_++;
}
} else {
VIXL_ASSERT(inval_set_->empty() || IsValid(inval_set_->preallocated_[0]));
// Nothing to do.
}
}
template <class S>
InvalSetIterator<S>::InvalSetIterator(const InvalSetIterator<S>& other)
: using_vector_(other.using_vector_),
index_(other.index_),
inval_set_(other.inval_set_) {
#ifdef VIXL_DEBUG
if (inval_set_ != NULL) inval_set_->Acquire();
#endif
}
#if __cplusplus >= 201103L
template <class S>
InvalSetIterator<S>::InvalSetIterator(InvalSetIterator<S>&& other) noexcept
: using_vector_(false),
index_(0),
inval_set_(NULL) {
swap(*this, other);
}
#endif
template <class S>
InvalSetIterator<S>& InvalSetIterator<S>::operator=(InvalSetIterator<S> other) {
swap(*this, other);
return *this;
}
template <class S>
bool InvalSetIterator<S>::operator==(const InvalSetIterator<S>& rhs) const {
bool equal = (inval_set_ == rhs.inval_set_);
// If the inval_set_ matches, using_vector_ must also match.
VIXL_ASSERT(!equal || (using_vector_ == rhs.using_vector_));
if (using_vector_) {
equal = equal && (iterator_ == rhs.iterator_);
// In debug mode, index_ is maintained even with using_vector_.
VIXL_ASSERT(!equal || (index_ == rhs.index_));
} else {
equal = equal && (index_ == rhs.index_);
#ifdef DEBUG
// If not using_vector_, iterator_ should be default-initialised.
typename std::vector<ElementType>::iterator default_iterator;
VIXL_ASSERT(iterator_ == default_iterator);
VIXL_ASSERT(rhs.iterator_ == default_iterator);
#endif
}
return equal;
}
template <class S>
InvalSetIterator<S>& InvalSetIterator<S>::operator++() {
// Pre-increment.
VIXL_ASSERT(!Done());
if (using_vector_) {
iterator_++;
#ifdef VIXL_DEBUG
index_++;
#endif
MoveToValidElement();
} else {
index_++;
}
return *this;
}
template <class S>
InvalSetIterator<S> InvalSetIterator<S>::operator++(int /* unused */) {
// Post-increment.
VIXL_ASSERT(!Done());
InvalSetIterator<S> old(*this);
++(*this);
return old;
}
#undef TEMPLATE_INVALSET_P_DECL
#undef TEMPLATE_INVALSET_P_DEF
} // namespace vixl
#endif // VIXL_INVALSET_H_

View file

@ -0,0 +1,75 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_MACRO_ASSEMBLER_INTERFACE_H
#define VIXL_MACRO_ASSEMBLER_INTERFACE_H
#include "assembler-base-vixl.h"
namespace vixl {
class MacroAssemblerInterface {
public:
virtual internal::AssemblerBase* AsAssemblerBase() = 0;
virtual ~MacroAssemblerInterface() {}
virtual bool AllowMacroInstructions() const = 0;
virtual bool ArePoolsBlocked() const = 0;
protected:
virtual void SetAllowMacroInstructions(bool allow) = 0;
virtual void BlockPools() = 0;
virtual void ReleasePools() = 0;
virtual void EnsureEmitPoolsFor(size_t size) = 0;
// Emit the branch over a literal/veneer pool, and any necessary padding
// before it.
virtual void EmitPoolHeader() = 0;
// When this is called, the label used for branching over the pool is bound.
// This can also generate additional padding, which must correspond to the
// alignment_ value passed to the PoolManager (which needs to keep track of
// the exact size of the generated pool).
virtual void EmitPoolFooter() = 0;
// Emit n bytes of padding that does not have to be executable.
virtual void EmitPaddingBytes(int n) = 0;
// Emit n bytes of padding that has to be executable. Implementations must
// make sure this is a multiple of the instruction size.
virtual void EmitNopBytes(int n) = 0;
// The following scopes need access to the above method in order to implement
// pool blocking and temporarily disable the macro-assembler.
friend class ExactAssemblyScope;
friend class EmissionCheckScope;
template <typename T>
friend class PoolManager;
};
} // namespace vixl
#endif // VIXL_MACRO_ASSEMBLER_INTERFACE_H

View file

@ -0,0 +1,39 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef PLATFORM_H
#define PLATFORM_H
// Define platform specific functionalities.
extern "C" {
#include <signal.h>
}
namespace vixl {
inline void HostBreakpoint() { raise(SIGINT); }
} // namespace vixl
#endif

View file

@ -0,0 +1,522 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_POOL_MANAGER_IMPL_H_
#define VIXL_POOL_MANAGER_IMPL_H_
#include "pool-manager.h"
#include <algorithm>
#include "assembler-base-vixl.h"
namespace vixl {
template <typename T>
T PoolManager<T>::Emit(MacroAssemblerInterface* masm,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
LocationBase<T>* new_object,
EmitOption option) {
// Make sure that the buffer still has the alignment we think it does.
VIXL_ASSERT(IsAligned(masm->AsAssemblerBase()
->GetBuffer()
->GetStartAddress<uintptr_t>(),
buffer_alignment_));
// We should not call this method when the pools are blocked.
VIXL_ASSERT(!IsBlocked());
if (objects_.empty()) return pc;
// Emit header.
if (option == kBranchRequired) {
masm->EmitPoolHeader();
// TODO: The pc at this point might not actually be aligned according to
// alignment_. This is to support the current AARCH32 MacroAssembler which
// does not have a fixed size instruction set. In practice, the pc will be
// aligned to the alignment instructions need for the current instruction
// set, so we do not need to align it here. All other calculations do take
// the alignment into account, which only makes the checkpoint calculations
// more conservative when we use T32. Uncomment the following assertion if
// the AARCH32 MacroAssembler is modified to only support one ISA at the
// time.
// VIXL_ASSERT(pc == AlignUp(pc, alignment_));
pc += header_size_;
} else {
// If the header is optional, we might need to add some extra padding to
// meet the minimum location of the first object.
if (pc < objects_[0].min_location_) {
int32_t padding = objects_[0].min_location_ - pc;
masm->EmitNopBytes(padding);
pc += padding;
}
}
PoolObject<T>* existing_object = GetObjectIfTracked(new_object);
// Go through all objects and emit one by one.
for (objects_iter iter = objects_.begin(); iter != objects_.end();) {
PoolObject<T>& current = *iter;
if (ShouldSkipObject(&current,
pc,
num_bytes,
new_reference,
new_object,
existing_object)) {
++iter;
continue;
}
LocationBase<T>* label_base = current.label_base_;
T aligned_pc = AlignUp(pc, current.alignment_);
masm->EmitPaddingBytes(aligned_pc - pc);
pc = aligned_pc;
VIXL_ASSERT(pc >= current.min_location_);
VIXL_ASSERT(pc <= current.max_location_);
// First call SetLocation, which will also resolve the references, and then
// call EmitPoolObject, which might add a new reference.
label_base->SetLocation(masm->AsAssemblerBase(), pc);
label_base->EmitPoolObject(masm);
int object_size = label_base->GetPoolObjectSizeInBytes();
if (label_base->ShouldDeletePoolObjectOnPlacement()) {
label_base->MarkBound();
iter = RemoveAndDelete(iter);
} else {
VIXL_ASSERT(!current.label_base_->ShouldDeletePoolObjectOnPlacement());
current.label_base_->UpdatePoolObject(&current);
VIXL_ASSERT(current.alignment_ >= label_base->GetPoolObjectAlignment());
++iter;
}
pc += object_size;
}
// Recalculate the checkpoint before emitting the footer. The footer might
// call Bind() which will check if we need to emit.
RecalculateCheckpoint();
// Always emit footer - this might add some padding.
masm->EmitPoolFooter();
pc = AlignUp(pc, alignment_);
return pc;
}
template <typename T>
bool PoolManager<T>::ShouldSkipObject(PoolObject<T>* pool_object,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
LocationBase<T>* new_object,
PoolObject<T>* existing_object) const {
// We assume that all objects before this have been skipped and all objects
// after this will be emitted, therefore we will emit the whole pool. Add
// the header size and alignment, as well as the number of bytes we are
// planning to emit.
T max_actual_location = pc + num_bytes + max_pool_size_;
if (new_reference != NULL) {
// If we're adding a new object, also assume that it will have to be emitted
// before the object we are considering to skip.
VIXL_ASSERT(new_object != NULL);
T new_object_alignment = std::max(new_reference->object_alignment_,
new_object->GetPoolObjectAlignment());
if ((existing_object != NULL) &&
(existing_object->alignment_ > new_object_alignment)) {
new_object_alignment = existing_object->alignment_;
}
max_actual_location +=
(new_object->GetPoolObjectSizeInBytes() + new_object_alignment - 1);
}
// Hard limit.
if (max_actual_location >= pool_object->max_location_) return false;
// Use heuristic.
return (pc < pool_object->skip_until_location_hint_);
}
template <typename T>
T PoolManager<T>::UpdateCheckpointForObject(T checkpoint,
const PoolObject<T>* object) {
checkpoint -= object->label_base_->GetPoolObjectSizeInBytes();
if (checkpoint > object->max_location_) checkpoint = object->max_location_;
checkpoint = AlignDown(checkpoint, object->alignment_);
return checkpoint;
}
template <typename T>
static T MaxCheckpoint() {
return std::numeric_limits<T>::max();
}
template <typename T>
static inline bool CheckCurrentPC(T pc, T checkpoint) {
VIXL_ASSERT(pc <= checkpoint);
// We must emit the pools if we are at the checkpoint now.
return pc == checkpoint;
}
template <typename T>
static inline bool CheckFuturePC(T pc, T checkpoint) {
// We do not need to emit the pools now if the projected future PC will be
// equal to the checkpoint (we will need to emit the pools then).
return pc > checkpoint;
}
template <typename T>
bool PoolManager<T>::MustEmit(T pc,
int num_bytes,
ForwardReference<T>* reference,
LocationBase<T>* label_base) const {
// Check if we are at or past the checkpoint.
if (CheckCurrentPC(pc, checkpoint_)) return true;
// Check if the future PC will be past the checkpoint.
pc += num_bytes;
if (CheckFuturePC(pc, checkpoint_)) return true;
// No new reference - nothing to do.
if (reference == NULL) {
VIXL_ASSERT(label_base == NULL);
return false;
}
if (objects_.empty()) {
// Basic assertions that restrictions on the new (and only) reference are
// possible to satisfy.
VIXL_ASSERT(AlignUp(pc + header_size_, alignment_) >=
reference->min_object_location_);
VIXL_ASSERT(pc <= reference->max_object_location_);
return false;
}
// Check if the object is already being tracked.
const PoolObject<T>* existing_object = GetObjectIfTracked(label_base);
if (existing_object != NULL) {
// If the existing_object is already in existing_objects_ and its new
// alignment and new location restrictions are not stricter, skip the more
// expensive check.
if ((reference->min_object_location_ <= existing_object->min_location_) &&
(reference->max_object_location_ >= existing_object->max_location_) &&
(reference->object_alignment_ <= existing_object->alignment_)) {
return false;
}
}
// Create a temporary object.
PoolObject<T> temp(label_base);
temp.RestrictRange(reference->min_object_location_,
reference->max_object_location_);
temp.RestrictAlignment(reference->object_alignment_);
if (existing_object != NULL) {
temp.RestrictRange(existing_object->min_location_,
existing_object->max_location_);
temp.RestrictAlignment(existing_object->alignment_);
}
// Check if the new reference can be added after the end of the current pool.
// If yes, we don't need to emit.
T last_reachable = AlignDown(temp.max_location_, temp.alignment_);
const PoolObject<T>& last = objects_.back();
T after_pool = AlignDown(last.max_location_, last.alignment_) +
last.label_base_->GetPoolObjectSizeInBytes();
// The current object can be placed at the end of the pool, even if the last
// object is placed at the last possible location.
if (last_reachable >= after_pool) return false;
// The current object can be placed after the code we are about to emit and
// after the existing pool (with a pessimistic size estimate).
if (last_reachable >= pc + num_bytes + max_pool_size_) return false;
// We're not in a trivial case, so we need to recalculate the checkpoint.
// Check (conservatively) if we can fit it into the objects_ array, without
// breaking our assumptions. Here we want to recalculate the checkpoint as
// if the new reference was added to the PoolManager but without actually
// adding it (as removing it is non-trivial).
T checkpoint = MaxCheckpoint<T>();
// Will temp be the last object in objects_?
if (PoolObjectLessThan(last, temp)) {
checkpoint = UpdateCheckpointForObject(checkpoint, &temp);
if (checkpoint < temp.min_location_) return true;
}
bool tempNotPlacedYet = true;
for (int i = static_cast<int>(objects_.size()) - 1; i >= 0; --i) {
const PoolObject<T>& current = objects_[i];
if (tempNotPlacedYet && PoolObjectLessThan(current, temp)) {
checkpoint = UpdateCheckpointForObject(checkpoint, &temp);
if (checkpoint < temp.min_location_) return true;
if (CheckFuturePC(pc, checkpoint)) return true;
tempNotPlacedYet = false;
}
if (current.label_base_ == label_base) continue;
checkpoint = UpdateCheckpointForObject(checkpoint, &current);
if (checkpoint < current.min_location_) return true;
if (CheckFuturePC(pc, checkpoint)) return true;
}
// temp is the object with the smallest max_location_.
if (tempNotPlacedYet) {
checkpoint = UpdateCheckpointForObject(checkpoint, &temp);
if (checkpoint < temp.min_location_) return true;
}
// Take the header into account.
checkpoint -= header_size_;
checkpoint = AlignDown(checkpoint, alignment_);
return CheckFuturePC(pc, checkpoint);
}
template <typename T>
void PoolManager<T>::RecalculateCheckpoint(SortOption sort_option) {
// TODO: Improve the max_pool_size_ estimate by starting from the
// min_location_ of the first object, calculating the end of the pool as if
// all objects were placed starting from there, and in the end adding the
// maximum object alignment found minus one (which is the maximum extra
// padding we would need if we were to relocate the pool to a different
// address).
max_pool_size_ = 0;
if (objects_.empty()) {
checkpoint_ = MaxCheckpoint<T>();
return;
}
// Sort objects by their max_location_.
if (sort_option == kSortRequired) {
std::sort(objects_.begin(), objects_.end(), PoolObjectLessThan);
}
// Add the header size and header and footer max alignment to the maximum
// pool size.
max_pool_size_ += header_size_ + 2 * (alignment_ - 1);
T checkpoint = MaxCheckpoint<T>();
int last_object_index = static_cast<int>(objects_.size()) - 1;
for (int i = last_object_index; i >= 0; --i) {
// Bring back the checkpoint by the size of the current object, unless
// we need to bring it back more, then align.
PoolObject<T>& current = objects_[i];
checkpoint = UpdateCheckpointForObject(checkpoint, &current);
VIXL_ASSERT(checkpoint >= current.min_location_);
max_pool_size_ += (current.alignment_ - 1 +
current.label_base_->GetPoolObjectSizeInBytes());
}
// Take the header into account.
checkpoint -= header_size_;
checkpoint = AlignDown(checkpoint, alignment_);
// Update the checkpoint of the pool manager.
checkpoint_ = checkpoint;
// NOTE: To handle min_location_ in the generic case, we could make a second
// pass of the objects_ vector, increasing the checkpoint as needed, while
// maintaining the alignment requirements.
// It should not be possible to have any issues with min_location_ with actual
// code, since there should always be some kind of branch over the pool,
// whether introduced by the pool emission or by the user, which will make
// sure the min_location_ requirement is satisfied. It's possible that the
// user could emit code in the literal pool and intentionally load the first
// value and then fall-through into the pool, but that is not a supported use
// of VIXL and we will assert in that case.
}
template <typename T>
bool PoolManager<T>::PoolObjectLessThan(const PoolObject<T>& a,
const PoolObject<T>& b) {
if (a.max_location_ != b.max_location_)
return (a.max_location_ < b.max_location_);
int a_size = a.label_base_->GetPoolObjectSizeInBytes();
int b_size = b.label_base_->GetPoolObjectSizeInBytes();
if (a_size != b_size) return (a_size < b_size);
if (a.alignment_ != b.alignment_) return (a.alignment_ < b.alignment_);
if (a.min_location_ != b.min_location_)
return (a.min_location_ < b.min_location_);
return false;
}
template <typename T>
void PoolManager<T>::AddObjectReference(const ForwardReference<T>* reference,
LocationBase<T>* label_base) {
VIXL_ASSERT(reference->object_alignment_ <= buffer_alignment_);
VIXL_ASSERT(label_base->GetPoolObjectAlignment() <= buffer_alignment_);
PoolObject<T>* object = GetObjectIfTracked(label_base);
if (object == NULL) {
PoolObject<T> new_object(label_base);
new_object.RestrictRange(reference->min_object_location_,
reference->max_object_location_);
new_object.RestrictAlignment(reference->object_alignment_);
Insert(new_object);
} else {
object->RestrictRange(reference->min_object_location_,
reference->max_object_location_);
object->RestrictAlignment(reference->object_alignment_);
// Move the object, if needed.
if (objects_.size() != 1) {
PoolObject<T> new_object(*object);
ptrdiff_t distance = std::distance(objects_.data(), object);
objects_.erase(objects_.begin() + distance);
Insert(new_object);
}
}
// No need to sort, we inserted the object in an already sorted array.
RecalculateCheckpoint(kNoSortRequired);
}
template <typename T>
void PoolManager<T>::Insert(const PoolObject<T>& new_object) {
bool inserted = false;
// Place the object in the right position.
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
PoolObject<T>& current = *iter;
if (!PoolObjectLessThan(current, new_object)) {
objects_.insert(iter, new_object);
inserted = true;
break;
}
}
if (!inserted) {
objects_.push_back(new_object);
}
}
template <typename T>
void PoolManager<T>::RemoveAndDelete(PoolObject<T>* object) {
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
PoolObject<T>& current = *iter;
if (current.label_base_ == object->label_base_) {
(void)RemoveAndDelete(iter);
return;
}
}
VIXL_UNREACHABLE();
}
template <typename T>
typename PoolManager<T>::objects_iter PoolManager<T>::RemoveAndDelete(
objects_iter iter) {
PoolObject<T>& object = *iter;
LocationBase<T>* label_base = object.label_base_;
// Check if we also need to delete the LocationBase object.
if (label_base->ShouldBeDeletedOnPoolManagerDestruction()) {
delete_on_destruction_.push_back(label_base);
}
if (label_base->ShouldBeDeletedOnPlacementByPoolManager()) {
VIXL_ASSERT(!label_base->ShouldBeDeletedOnPoolManagerDestruction());
delete label_base;
}
return objects_.erase(iter);
}
template <typename T>
T PoolManager<T>::Bind(MacroAssemblerInterface* masm,
LocationBase<T>* object,
T location) {
PoolObject<T>* existing_object = GetObjectIfTracked(object);
int alignment;
T min_location;
if (existing_object == NULL) {
alignment = object->GetMaxAlignment();
min_location = object->GetMinLocation();
} else {
alignment = existing_object->alignment_;
min_location = existing_object->min_location_;
}
// Align if needed, and add necessary padding to reach the min_location_.
T aligned_location = AlignUp(location, alignment);
masm->EmitNopBytes(aligned_location - location);
location = aligned_location;
while (location < min_location) {
masm->EmitNopBytes(alignment);
location += alignment;
}
object->SetLocation(masm->AsAssemblerBase(), location);
object->MarkBound();
if (existing_object != NULL) {
RemoveAndDelete(existing_object);
// No need to sort, we removed the object from a sorted array.
RecalculateCheckpoint(kNoSortRequired);
}
// We assume that the maximum padding we can possibly add here is less
// than the header alignment - hence that we're not going to go past our
// checkpoint.
VIXL_ASSERT(!CheckFuturePC(location, checkpoint_));
return location;
}
template <typename T>
void PoolManager<T>::Release(T pc) {
USE(pc);
if (--monitor_ == 0) {
// Ensure the pool has not been blocked for too long.
VIXL_ASSERT(pc <= checkpoint_);
}
}
template <typename T>
PoolManager<T>::~PoolManager<T>() {
#ifdef VIXL_DEBUG
// Check for unbound objects.
for (objects_iter iter = objects_.begin(); iter != objects_.end(); ++iter) {
// There should not be any bound objects left in the pool. For unbound
// objects, we will check in the destructor of the object itself.
VIXL_ASSERT(!(*iter).label_base_->IsBound());
}
#endif
// Delete objects the pool manager owns.
for (typename std::vector<LocationBase<T> *>::iterator
iter = delete_on_destruction_.begin(),
end = delete_on_destruction_.end();
iter != end;
++iter) {
delete *iter;
}
}
template <typename T>
int PoolManager<T>::GetPoolSizeForTest() const {
// Iterate over objects and return their cumulative size. This does not take
// any padding into account, just the size of the objects themselves.
int size = 0;
for (const_objects_iter iter = objects_.begin(); iter != objects_.end();
++iter) {
size += (*iter).label_base_->GetPoolObjectSizeInBytes();
}
return size;
}
}
#endif // VIXL_POOL_MANAGER_IMPL_H_

View file

@ -0,0 +1,555 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_POOL_MANAGER_H_
#define VIXL_POOL_MANAGER_H_
#include <stdint.h>
#include <cstddef>
#include <limits>
#include <map>
#include <vector>
#include "globals-vixl.h"
#include "macro-assembler-interface.h"
#include "utils-vixl.h"
namespace vixl {
class TestPoolManager;
// There are four classes declared in this header file:
// PoolManager, PoolObject, ForwardReference and LocationBase.
// The PoolManager manages both literal and veneer pools, and is designed to be
// shared between AArch32 and AArch64. A pool is represented as an abstract
// collection of references to objects. The manager does not need to know
// architecture-specific details about literals and veneers; the actual
// emission of the pool objects is delegated.
//
// Literal and Label will derive from LocationBase. The MacroAssembler will
// create these objects as instructions that reference pool objects are
// encountered, and ask the PoolManager to track them. The PoolManager will
// create an internal PoolObject object for each object derived from
// LocationBase. Some of these PoolObject objects will be deleted when placed
// (e.g. the ones corresponding to Literals), whereas others will be updated
// with a new range when placed (e.g. Veneers) and deleted when Bind() is
// called on the PoolManager with their corresponding object as a parameter.
//
// A ForwardReference represents a reference to a PoolObject that will be
// placed later in the instruction stream. Each ForwardReference may only refer
// to one PoolObject, but many ForwardReferences may refer to the same
// object.
//
// A PoolObject represents an object that has not yet been placed. The final
// location of a PoolObject (and hence the LocationBase object to which it
// corresponds) is constrained mostly by the instructions that refer to it, but
// PoolObjects can also have inherent constraints, such as alignment.
//
// LocationBase objects, unlike PoolObject objects, can be used outside of the
// pool manager (e.g. as manually placed literals, which may still have
// forward references that need to be resolved).
//
// At the moment, each LocationBase will have at most one PoolObject that keeps
// the relevant information for placing this object in the pool. When that
// object is placed, all forward references of the object are resolved. For
// that reason, we do not need to keep track of the ForwardReference objects in
// the PoolObject.
// T is an integral type used for representing locations. For a 32-bit
// architecture it will typically be int32_t, whereas for a 64-bit
// architecture it will be int64_t.
template <typename T>
class ForwardReference;
template <typename T>
class PoolObject;
template <typename T>
class PoolManager;
// Represents an object that has a size and alignment, and either has a known
// location or has not been placed yet. An object of a subclass of LocationBase
// will typically keep track of a number of ForwardReferences when it has not
// yet been placed, but LocationBase does not assume or implement that
// functionality. LocationBase provides virtual methods for emitting the
// object, updating all the forward references, and giving the PoolManager
// information on the lifetime of this object and the corresponding PoolObject.
template <typename T>
class LocationBase {
public:
// The size of a LocationBase object is restricted to 4KB, in order to avoid
// situations where the size of the pool becomes larger than the range of
// an unconditional branch. This cannot happen without having large objects,
// as typically the range of an unconditional branch is the larger range
// an instruction supports.
// TODO: This would ideally be an architecture-specific value, perhaps
// another template parameter.
static const int kMaxObjectSize = 4 * KBytes;
// By default, LocationBase objects are aligned naturally to their size.
LocationBase(uint32_t type, int size)
: pool_object_size_(size),
pool_object_alignment_(size),
pool_object_type_(type),
is_bound_(false),
location_(0) {
VIXL_ASSERT(size > 0);
VIXL_ASSERT(size <= kMaxObjectSize);
VIXL_ASSERT(IsPowerOf2(size));
}
// Allow alignment to be specified, as long as it is smaller than the size.
LocationBase(uint32_t type, int size, int alignment)
: pool_object_size_(size),
pool_object_alignment_(alignment),
pool_object_type_(type),
is_bound_(false),
location_(0) {
VIXL_ASSERT(size > 0);
VIXL_ASSERT(size <= kMaxObjectSize);
VIXL_ASSERT(IsPowerOf2(alignment));
VIXL_ASSERT(alignment <= size);
}
// Constructor for locations that are already bound.
explicit LocationBase(T location)
: pool_object_size_(-1),
pool_object_alignment_(-1),
pool_object_type_(0),
is_bound_(true),
location_(location) {}
virtual ~LocationBase() {}
// The PoolManager should assume ownership of some objects, and delete them
// after they have been placed. This can happen for example for literals that
// are created internally to the MacroAssembler and the user doesn't get a
// handle to. By default, the PoolManager will not do this.
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const { return false; }
// The PoolManager should assume ownership of some objects, and delete them
// when it is destroyed. By default, the PoolManager will not do this.
virtual bool ShouldBeDeletedOnPoolManagerDestruction() const { return false; }
// Emit the PoolObject. Derived classes will implement this method to emit
// the necessary data and/or code (for example, to emit a literal or a
// veneer). This should not add padding, as it is added explicitly by the pool
// manager.
virtual void EmitPoolObject(MacroAssemblerInterface* masm) = 0;
// Resolve the references to this object. Will encode the necessary offset
// in the instruction corresponding to each reference and then delete it.
// TODO: An alternative here would be to provide a ResolveReference()
// method that only asks the LocationBase to resolve a specific reference
// (thus allowing the pool manager to resolve some of the references only).
// This would mean we need to have some kind of API to get all the references
// to a LabelObject.
virtual void ResolveReferences(internal::AssemblerBase* assembler) = 0;
// Returns true when the PoolObject corresponding to this LocationBase object
// needs to be removed from the pool once placed, and false if it needs to
// be updated instead (in which case UpdatePoolObject will be called).
virtual bool ShouldDeletePoolObjectOnPlacement() const { return true; }
// Update the PoolObject after placing it, if necessary. This will happen for
// example in the case of a placed veneer, where we need to use a new updated
// range and a new reference (from the newly added branch instruction).
// By default, this does nothing, to avoid forcing objects that will not need
// this to have an empty implementation.
virtual void UpdatePoolObject(PoolObject<T>*) {}
// Implement heuristics for emitting this object. If a margin is to be used
// as a hint during pool emission, we will try not to emit the object if we
// are further away from the maximum reachable location by more than the
// margin.
virtual bool UsePoolObjectEmissionMargin() const { return false; }
virtual T GetPoolObjectEmissionMargin() const {
VIXL_ASSERT(UsePoolObjectEmissionMargin() == false);
return 0;
}
int GetPoolObjectSizeInBytes() const { return pool_object_size_; }
int GetPoolObjectAlignment() const { return pool_object_alignment_; }
uint32_t GetPoolObjectType() const { return pool_object_type_; }
bool IsBound() const { return is_bound_; }
T GetLocation() const { return location_; }
// This function can be called multiple times before the object is marked as
// bound with MarkBound() below. This is because some objects (e.g. the ones
// used to represent labels) can have veneers; every time we place a veneer
// we need to keep track of the location in order to resolve the references
// to the object. Reusing the location_ field for this is convenient.
void SetLocation(internal::AssemblerBase* assembler, T location) {
VIXL_ASSERT(!is_bound_);
location_ = location;
ResolveReferences(assembler);
}
void MarkBound() {
VIXL_ASSERT(!is_bound_);
is_bound_ = true;
}
// The following two functions are used when an object is bound by a call to
// PoolManager<T>::Bind().
virtual int GetMaxAlignment() const {
VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement());
return 1;
}
virtual T GetMinLocation() const {
VIXL_ASSERT(!ShouldDeletePoolObjectOnPlacement());
return 0;
}
private:
// The size of the corresponding PoolObject, in bytes.
int pool_object_size_;
// The alignment of the corresponding PoolObject; this must be a power of two.
int pool_object_alignment_;
// Different derived classes should have different type values. This can be
// used internally by the PoolManager for grouping of objects.
uint32_t pool_object_type_;
// Has the object been bound to a location yet?
bool is_bound_;
protected:
// See comment on SetLocation() for the use of this field.
T location_;
};
template <typename T>
class PoolObject {
public:
// By default, PoolObjects have no inherent position constraints.
explicit PoolObject(LocationBase<T>* parent)
: label_base_(parent),
min_location_(0),
max_location_(std::numeric_limits<T>::max()),
alignment_(parent->GetPoolObjectAlignment()),
skip_until_location_hint_(0),
type_(parent->GetPoolObjectType()) {
VIXL_ASSERT(IsPowerOf2(alignment_));
UpdateLocationHint();
}
// Reset the minimum and maximum location and the alignment of the object.
// This function is public in order to allow the LocationBase corresponding to
// this PoolObject to update the PoolObject when placed, e.g. in the case of
// veneers. The size and type of the object cannot be modified.
void Update(T min, T max, int alignment) {
// We don't use RestrictRange here as the new range is independent of the
// old range (and the maximum location is typically larger).
min_location_ = min;
max_location_ = max;
RestrictAlignment(alignment);
UpdateLocationHint();
}
private:
void RestrictRange(T min, T max) {
VIXL_ASSERT(min <= max_location_);
VIXL_ASSERT(max >= min_location_);
min_location_ = std::max(min_location_, min);
max_location_ = std::min(max_location_, max);
UpdateLocationHint();
}
void RestrictAlignment(int alignment) {
VIXL_ASSERT(IsPowerOf2(alignment));
VIXL_ASSERT(IsPowerOf2(alignment_));
alignment_ = std::max(alignment_, alignment);
}
void UpdateLocationHint() {
if (label_base_->UsePoolObjectEmissionMargin()) {
skip_until_location_hint_ =
max_location_ - label_base_->GetPoolObjectEmissionMargin();
}
}
// The LocationBase that this pool object represents.
LocationBase<T>* label_base_;
// Hard, precise location constraints for the start location of the object.
// They are both inclusive, that is the start location of the object can be
// at any location between min_location_ and max_location_, themselves
// included.
T min_location_;
T max_location_;
// The alignment must be a power of two.
int alignment_;
// Avoid generating this object until skip_until_location_hint_. This
// supports cases where placing the object in the pool has an inherent cost
// that could be avoided in some other way. Veneers are a typical example; we
// would prefer to branch directly (over a pool) rather than use veneers, so
// this value can be set using some heuristic to leave them in the pool.
// This value is only a hint, which will be ignored if it has to in order to
// meet the hard constraints we have.
T skip_until_location_hint_;
// Used only to group objects of similar type together. The PoolManager does
// not know what the types represent.
uint32_t type_;
friend class PoolManager<T>;
};
// Class that represents a forward reference. It is the responsibility of
// LocationBase objects to keep track of forward references and patch them when
// an object is placed - this class is only used by the PoolManager in order to
// restrict the requirements on PoolObjects it is tracking.
template <typename T>
class ForwardReference {
public:
ForwardReference(T location,
int size,
T min_object_location,
T max_object_location,
int object_alignment = 1)
: location_(location),
size_(size),
object_alignment_(object_alignment),
min_object_location_(min_object_location),
max_object_location_(max_object_location) {
VIXL_ASSERT(AlignDown(max_object_location, object_alignment) >=
min_object_location);
}
bool LocationIsEncodable(T location) const {
return location >= min_object_location_ &&
location <= max_object_location_ &&
IsAligned(location, object_alignment_);
}
T GetLocation() const { return location_; }
T GetMinLocation() const { return min_object_location_; }
T GetMaxLocation() const { return max_object_location_; }
int GetAlignment() const { return object_alignment_; }
// Needed for InvalSet.
void SetLocationToInvalidateOnly(T location) { location_ = location; }
private:
// The location of the thing that contains the reference. For example, this
// can be the location of the branch or load instruction.
T location_;
// The size of the instruction that makes the reference, in bytes.
int size_;
// The alignment that the object must satisfy for this reference - must be a
// power of two.
int object_alignment_;
// Specify the possible locations where the object could be stored. AArch32's
// PC offset, and T32's PC alignment calculations should be applied by the
// Assembler, not here. The PoolManager deals only with simple locationes.
// Including min_object_adddress_ is necessary to handle AArch32 some
// instructions which have a minimum offset of 0, but also have the implicit
// PC offset.
// Note that this structure cannot handle sparse ranges, such as A32's ADR,
// but doing so is costly and probably not useful in practice. The min and
// and max object location both refer to the beginning of the object, are
// inclusive and are not affected by the object size. E.g. if
// max_object_location_ is equal to X, we can place the object at location X
// regardless of its size.
T min_object_location_;
T max_object_location_;
friend class PoolManager<T>;
};
template <typename T>
class PoolManager {
public:
PoolManager(int header_size, int alignment, int buffer_alignment)
: header_size_(header_size),
alignment_(alignment),
buffer_alignment_(buffer_alignment),
checkpoint_(std::numeric_limits<T>::max()),
max_pool_size_(0),
monitor_(0) {}
~PoolManager();
// Check if we will need to emit the pool at location 'pc', when planning to
// generate a certain number of bytes. This optionally takes a
// ForwardReference we are about to generate, in which case the size of the
// reference must be included in 'num_bytes'.
bool MustEmit(T pc,
int num_bytes = 0,
ForwardReference<T>* reference = NULL,
LocationBase<T>* object = NULL) const;
enum EmitOption { kBranchRequired, kNoBranchRequired };
// Emit the pool at location 'pc', using 'masm' as the macroassembler.
// The branch over the header can be optionally omitted using 'option'.
// Returns the new PC after pool emission.
// This expects a number of bytes that are about to be emitted, to be taken
// into account in heuristics for pool object emission.
// This also optionally takes a forward reference and an object as
// parameters, to be used in the case where emission of the pool is triggered
// by adding a new reference to the pool that does not fit. The pool manager
// will need this information in order to apply its heuristics correctly.
T Emit(MacroAssemblerInterface* masm,
T pc,
int num_bytes = 0,
ForwardReference<T>* new_reference = NULL,
LocationBase<T>* new_object = NULL,
EmitOption option = kBranchRequired);
// Add 'reference' to 'object'. Should not be preceded by a call to MustEmit()
// that returned true, unless Emit() has been successfully afterwards.
void AddObjectReference(const ForwardReference<T>* reference,
LocationBase<T>* object);
// This is to notify the pool that a LocationBase has been bound to a location
// and does not need to be tracked anymore.
// This will happen, for example, for Labels, which are manually bound by the
// user.
// This can potentially add some padding bytes in order to meet the object
// requirements, and will return the new location.
T Bind(MacroAssemblerInterface* masm, LocationBase<T>* object, T location);
// Functions for blocking and releasing the pools.
void Block() { monitor_++; }
void Release(T pc);
bool IsBlocked() const { return monitor_ != 0; }
private:
typedef typename std::vector<PoolObject<T> >::iterator objects_iter;
typedef
typename std::vector<PoolObject<T> >::const_iterator const_objects_iter;
PoolObject<T>* GetObjectIfTracked(LocationBase<T>* label) {
return const_cast<PoolObject<T>*>(
static_cast<const PoolManager<T>*>(this)->GetObjectIfTracked(label));
}
const PoolObject<T>* GetObjectIfTracked(LocationBase<T>* label) const {
for (const_objects_iter iter = objects_.begin(); iter != objects_.end();
++iter) {
const PoolObject<T>& current = *iter;
if (current.label_base_ == label) return &current;
}
return NULL;
}
// Helper function for calculating the checkpoint.
enum SortOption { kSortRequired, kNoSortRequired };
void RecalculateCheckpoint(SortOption sort_option = kSortRequired);
// Comparison function for using std::sort() on objects_. PoolObject A is
// ordered before PoolObject B when A should be emitted before B. The
// comparison depends on the max_location_, size_, alignment_ and
// min_location_.
static bool PoolObjectLessThan(const PoolObject<T>& a,
const PoolObject<T>& b);
// Helper function used in the checkpoint calculation. 'checkpoint' is the
// current checkpoint, which is modified to take 'object' into account. The
// new checkpoint is returned.
static T UpdateCheckpointForObject(T checkpoint, const PoolObject<T>* object);
// Helper function to add a new object into a sorted objects_ array.
void Insert(const PoolObject<T>& new_object);
// Helper functions to remove an object from objects_ and delete the
// corresponding LocationBase object, if necessary. This will be called
// either after placing the object, or when Bind() is called.
void RemoveAndDelete(PoolObject<T>* object);
objects_iter RemoveAndDelete(objects_iter iter);
// Helper function to check if we should skip emitting an object.
bool ShouldSkipObject(PoolObject<T>* pool_object,
T pc,
int num_bytes,
ForwardReference<T>* new_reference,
LocationBase<T>* new_object,
PoolObject<T>* existing_object) const;
// Used only for debugging.
void DumpCurrentState(T pc) const;
// Methods used for testing only, via the test friend classes.
bool PoolIsEmptyForTest() const { return objects_.empty(); }
T GetCheckpointForTest() const { return checkpoint_; }
int GetPoolSizeForTest() const;
// The objects we are tracking references to. The objects_ vector is sorted
// at all times between calls to the public members of the PoolManager. It
// is sorted every time we add, delete or update a PoolObject.
// TODO: Consider a more efficient data structure here, to allow us to delete
// elements as we emit them.
std::vector<PoolObject<T> > objects_;
// Objects to be deleted on pool destruction.
std::vector<LocationBase<T>*> delete_on_destruction_;
// The header_size_ and alignment_ values are hardcoded for each instance of
// PoolManager. The PoolManager does not know how to emit the header, and
// relies on the EmitPoolHeader and EndPool methods of the
// MacroAssemblerInterface for that. It will also emit padding if necessary,
// both for the header and at the end of the pool, according to alignment_,
// and using the EmitNopBytes and EmitPaddingBytes method of the
// MacroAssemblerInterface.
// The size of the header, in bytes.
int header_size_;
// The alignment of the header - must be a power of two.
int alignment_;
// The alignment of the buffer - we cannot guarantee any object alignment
// larger than this alignment. When a buffer is grown, this alignment has
// to be guaranteed.
// TODO: Consider extending this to describe the guaranteed alignment as the
// modulo of a known number.
int buffer_alignment_;
// The current checkpoint. This is the latest location at which the pool
// *must* be emitted. This should not be visible outside the pool manager
// and should only be updated in RecalculateCheckpoint.
T checkpoint_;
// Maximum size of the pool, assuming we need the maximum possible padding
// for each object and for the header. It is only updated in
// RecalculateCheckpoint.
T max_pool_size_;
// Indicates whether the emission of this pool is blocked.
int monitor_;
friend class vixl::TestPoolManager;
};
} // namespace vixl
#endif // VIXL_POOL_MANAGER_H_

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,855 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "aarch32/constants-aarch32.h"
#include "utils-vixl.h"
namespace vixl {
namespace aarch32 {
// Start of generated code.
const char* ToCString(InstructionType type) {
switch (type) {
case kAdc:
return "adc";
case kAdcs:
return "adcs";
case kAdd:
return "add";
case kAdds:
return "adds";
case kAddw:
return "addw";
case kAdr:
return "adr";
case kAnd:
return "and";
case kAnds:
return "ands";
case kAsr:
return "asr";
case kAsrs:
return "asrs";
case kB:
return "b";
case kBfc:
return "bfc";
case kBfi:
return "bfi";
case kBic:
return "bic";
case kBics:
return "bics";
case kBkpt:
return "bkpt";
case kBl:
return "bl";
case kBlx:
return "blx";
case kBx:
return "bx";
case kBxj:
return "bxj";
case kCbnz:
return "cbnz";
case kCbz:
return "cbz";
case kClrex:
return "clrex";
case kClz:
return "clz";
case kCmn:
return "cmn";
case kCmp:
return "cmp";
case kCrc32b:
return "crc32b";
case kCrc32cb:
return "crc32cb";
case kCrc32ch:
return "crc32ch";
case kCrc32cw:
return "crc32cw";
case kCrc32h:
return "crc32h";
case kCrc32w:
return "crc32w";
case kDmb:
return "dmb";
case kDsb:
return "dsb";
case kEor:
return "eor";
case kEors:
return "eors";
case kFldmdbx:
return "fldmdbx";
case kFldmiax:
return "fldmiax";
case kFstmdbx:
return "fstmdbx";
case kFstmiax:
return "fstmiax";
case kHlt:
return "hlt";
case kHvc:
return "hvc";
case kIsb:
return "isb";
case kIt:
return "it";
case kLda:
return "lda";
case kLdab:
return "ldab";
case kLdaex:
return "ldaex";
case kLdaexb:
return "ldaexb";
case kLdaexd:
return "ldaexd";
case kLdaexh:
return "ldaexh";
case kLdah:
return "ldah";
case kLdm:
return "ldm";
case kLdmda:
return "ldmda";
case kLdmdb:
return "ldmdb";
case kLdmea:
return "ldmea";
case kLdmed:
return "ldmed";
case kLdmfa:
return "ldmfa";
case kLdmfd:
return "ldmfd";
case kLdmib:
return "ldmib";
case kLdr:
return "ldr";
case kLdrb:
return "ldrb";
case kLdrd:
return "ldrd";
case kLdrex:
return "ldrex";
case kLdrexb:
return "ldrexb";
case kLdrexd:
return "ldrexd";
case kLdrexh:
return "ldrexh";
case kLdrh:
return "ldrh";
case kLdrsb:
return "ldrsb";
case kLdrsh:
return "ldrsh";
case kLsl:
return "lsl";
case kLsls:
return "lsls";
case kLsr:
return "lsr";
case kLsrs:
return "lsrs";
case kMla:
return "mla";
case kMlas:
return "mlas";
case kMls:
return "mls";
case kMov:
return "mov";
case kMovs:
return "movs";
case kMovt:
return "movt";
case kMovw:
return "movw";
case kMrs:
return "mrs";
case kMsr:
return "msr";
case kMul:
return "mul";
case kMuls:
return "muls";
case kMvn:
return "mvn";
case kMvns:
return "mvns";
case kNop:
return "nop";
case kOrn:
return "orn";
case kOrns:
return "orns";
case kOrr:
return "orr";
case kOrrs:
return "orrs";
case kPkhbt:
return "pkhbt";
case kPkhtb:
return "pkhtb";
case kPld:
return "pld";
case kPldw:
return "pldw";
case kPli:
return "pli";
case kPop:
return "pop";
case kPush:
return "push";
case kQadd:
return "qadd";
case kQadd16:
return "qadd16";
case kQadd8:
return "qadd8";
case kQasx:
return "qasx";
case kQdadd:
return "qdadd";
case kQdsub:
return "qdsub";
case kQsax:
return "qsax";
case kQsub:
return "qsub";
case kQsub16:
return "qsub16";
case kQsub8:
return "qsub8";
case kRbit:
return "rbit";
case kRev:
return "rev";
case kRev16:
return "rev16";
case kRevsh:
return "revsh";
case kRor:
return "ror";
case kRors:
return "rors";
case kRrx:
return "rrx";
case kRrxs:
return "rrxs";
case kRsb:
return "rsb";
case kRsbs:
return "rsbs";
case kRsc:
return "rsc";
case kRscs:
return "rscs";
case kSadd16:
return "sadd16";
case kSadd8:
return "sadd8";
case kSasx:
return "sasx";
case kSbc:
return "sbc";
case kSbcs:
return "sbcs";
case kSbfx:
return "sbfx";
case kSdiv:
return "sdiv";
case kSel:
return "sel";
case kShadd16:
return "shadd16";
case kShadd8:
return "shadd8";
case kShasx:
return "shasx";
case kShsax:
return "shsax";
case kShsub16:
return "shsub16";
case kShsub8:
return "shsub8";
case kSmlabb:
return "smlabb";
case kSmlabt:
return "smlabt";
case kSmlad:
return "smlad";
case kSmladx:
return "smladx";
case kSmlal:
return "smlal";
case kSmlalbb:
return "smlalbb";
case kSmlalbt:
return "smlalbt";
case kSmlald:
return "smlald";
case kSmlaldx:
return "smlaldx";
case kSmlals:
return "smlals";
case kSmlaltb:
return "smlaltb";
case kSmlaltt:
return "smlaltt";
case kSmlatb:
return "smlatb";
case kSmlatt:
return "smlatt";
case kSmlawb:
return "smlawb";
case kSmlawt:
return "smlawt";
case kSmlsd:
return "smlsd";
case kSmlsdx:
return "smlsdx";
case kSmlsld:
return "smlsld";
case kSmlsldx:
return "smlsldx";
case kSmmla:
return "smmla";
case kSmmlar:
return "smmlar";
case kSmmls:
return "smmls";
case kSmmlsr:
return "smmlsr";
case kSmmul:
return "smmul";
case kSmmulr:
return "smmulr";
case kSmuad:
return "smuad";
case kSmuadx:
return "smuadx";
case kSmulbb:
return "smulbb";
case kSmulbt:
return "smulbt";
case kSmull:
return "smull";
case kSmulls:
return "smulls";
case kSmultb:
return "smultb";
case kSmultt:
return "smultt";
case kSmulwb:
return "smulwb";
case kSmulwt:
return "smulwt";
case kSmusd:
return "smusd";
case kSmusdx:
return "smusdx";
case kSsat:
return "ssat";
case kSsat16:
return "ssat16";
case kSsax:
return "ssax";
case kSsub16:
return "ssub16";
case kSsub8:
return "ssub8";
case kStl:
return "stl";
case kStlb:
return "stlb";
case kStlex:
return "stlex";
case kStlexb:
return "stlexb";
case kStlexd:
return "stlexd";
case kStlexh:
return "stlexh";
case kStlh:
return "stlh";
case kStm:
return "stm";
case kStmda:
return "stmda";
case kStmdb:
return "stmdb";
case kStmea:
return "stmea";
case kStmed:
return "stmed";
case kStmfa:
return "stmfa";
case kStmfd:
return "stmfd";
case kStmib:
return "stmib";
case kStr:
return "str";
case kStrb:
return "strb";
case kStrd:
return "strd";
case kStrex:
return "strex";
case kStrexb:
return "strexb";
case kStrexd:
return "strexd";
case kStrexh:
return "strexh";
case kStrh:
return "strh";
case kSub:
return "sub";
case kSubs:
return "subs";
case kSubw:
return "subw";
case kSvc:
return "svc";
case kSxtab:
return "sxtab";
case kSxtab16:
return "sxtab16";
case kSxtah:
return "sxtah";
case kSxtb:
return "sxtb";
case kSxtb16:
return "sxtb16";
case kSxth:
return "sxth";
case kTbb:
return "tbb";
case kTbh:
return "tbh";
case kTeq:
return "teq";
case kTst:
return "tst";
case kUadd16:
return "uadd16";
case kUadd8:
return "uadd8";
case kUasx:
return "uasx";
case kUbfx:
return "ubfx";
case kUdf:
return "udf";
case kUdiv:
return "udiv";
case kUhadd16:
return "uhadd16";
case kUhadd8:
return "uhadd8";
case kUhasx:
return "uhasx";
case kUhsax:
return "uhsax";
case kUhsub16:
return "uhsub16";
case kUhsub8:
return "uhsub8";
case kUmaal:
return "umaal";
case kUmlal:
return "umlal";
case kUmlals:
return "umlals";
case kUmull:
return "umull";
case kUmulls:
return "umulls";
case kUqadd16:
return "uqadd16";
case kUqadd8:
return "uqadd8";
case kUqasx:
return "uqasx";
case kUqsax:
return "uqsax";
case kUqsub16:
return "uqsub16";
case kUqsub8:
return "uqsub8";
case kUsad8:
return "usad8";
case kUsada8:
return "usada8";
case kUsat:
return "usat";
case kUsat16:
return "usat16";
case kUsax:
return "usax";
case kUsub16:
return "usub16";
case kUsub8:
return "usub8";
case kUxtab:
return "uxtab";
case kUxtab16:
return "uxtab16";
case kUxtah:
return "uxtah";
case kUxtb:
return "uxtb";
case kUxtb16:
return "uxtb16";
case kUxth:
return "uxth";
case kVaba:
return "vaba";
case kVabal:
return "vabal";
case kVabd:
return "vabd";
case kVabdl:
return "vabdl";
case kVabs:
return "vabs";
case kVacge:
return "vacge";
case kVacgt:
return "vacgt";
case kVacle:
return "vacle";
case kVaclt:
return "vaclt";
case kVadd:
return "vadd";
case kVaddhn:
return "vaddhn";
case kVaddl:
return "vaddl";
case kVaddw:
return "vaddw";
case kVand:
return "vand";
case kVbic:
return "vbic";
case kVbif:
return "vbif";
case kVbit:
return "vbit";
case kVbsl:
return "vbsl";
case kVceq:
return "vceq";
case kVcge:
return "vcge";
case kVcgt:
return "vcgt";
case kVcle:
return "vcle";
case kVcls:
return "vcls";
case kVclt:
return "vclt";
case kVclz:
return "vclz";
case kVcmp:
return "vcmp";
case kVcmpe:
return "vcmpe";
case kVcnt:
return "vcnt";
case kVcvt:
return "vcvt";
case kVcvta:
return "vcvta";
case kVcvtb:
return "vcvtb";
case kVcvtm:
return "vcvtm";
case kVcvtn:
return "vcvtn";
case kVcvtp:
return "vcvtp";
case kVcvtr:
return "vcvtr";
case kVcvtt:
return "vcvtt";
case kVdiv:
return "vdiv";
case kVdup:
return "vdup";
case kVeor:
return "veor";
case kVext:
return "vext";
case kVfma:
return "vfma";
case kVfms:
return "vfms";
case kVfnma:
return "vfnma";
case kVfnms:
return "vfnms";
case kVhadd:
return "vhadd";
case kVhsub:
return "vhsub";
case kVld1:
return "vld1";
case kVld2:
return "vld2";
case kVld3:
return "vld3";
case kVld4:
return "vld4";
case kVldm:
return "vldm";
case kVldmdb:
return "vldmdb";
case kVldmia:
return "vldmia";
case kVldr:
return "vldr";
case kVmax:
return "vmax";
case kVmaxnm:
return "vmaxnm";
case kVmin:
return "vmin";
case kVminnm:
return "vminnm";
case kVmla:
return "vmla";
case kVmlal:
return "vmlal";
case kVmls:
return "vmls";
case kVmlsl:
return "vmlsl";
case kVmov:
return "vmov";
case kVmovl:
return "vmovl";
case kVmovn:
return "vmovn";
case kVmrs:
return "vmrs";
case kVmsr:
return "vmsr";
case kVmul:
return "vmul";
case kVmull:
return "vmull";
case kVmvn:
return "vmvn";
case kVneg:
return "vneg";
case kVnmla:
return "vnmla";
case kVnmls:
return "vnmls";
case kVnmul:
return "vnmul";
case kVorn:
return "vorn";
case kVorr:
return "vorr";
case kVpadal:
return "vpadal";
case kVpadd:
return "vpadd";
case kVpaddl:
return "vpaddl";
case kVpmax:
return "vpmax";
case kVpmin:
return "vpmin";
case kVpop:
return "vpop";
case kVpush:
return "vpush";
case kVqabs:
return "vqabs";
case kVqadd:
return "vqadd";
case kVqdmlal:
return "vqdmlal";
case kVqdmlsl:
return "vqdmlsl";
case kVqdmulh:
return "vqdmulh";
case kVqdmull:
return "vqdmull";
case kVqmovn:
return "vqmovn";
case kVqmovun:
return "vqmovun";
case kVqneg:
return "vqneg";
case kVqrdmulh:
return "vqrdmulh";
case kVqrshl:
return "vqrshl";
case kVqrshrn:
return "vqrshrn";
case kVqrshrun:
return "vqrshrun";
case kVqshl:
return "vqshl";
case kVqshlu:
return "vqshlu";
case kVqshrn:
return "vqshrn";
case kVqshrun:
return "vqshrun";
case kVqsub:
return "vqsub";
case kVraddhn:
return "vraddhn";
case kVrecpe:
return "vrecpe";
case kVrecps:
return "vrecps";
case kVrev16:
return "vrev16";
case kVrev32:
return "vrev32";
case kVrev64:
return "vrev64";
case kVrhadd:
return "vrhadd";
case kVrinta:
return "vrinta";
case kVrintm:
return "vrintm";
case kVrintn:
return "vrintn";
case kVrintp:
return "vrintp";
case kVrintr:
return "vrintr";
case kVrintx:
return "vrintx";
case kVrintz:
return "vrintz";
case kVrshl:
return "vrshl";
case kVrshr:
return "vrshr";
case kVrshrn:
return "vrshrn";
case kVrsqrte:
return "vrsqrte";
case kVrsqrts:
return "vrsqrts";
case kVrsra:
return "vrsra";
case kVrsubhn:
return "vrsubhn";
case kVseleq:
return "vseleq";
case kVselge:
return "vselge";
case kVselgt:
return "vselgt";
case kVselvs:
return "vselvs";
case kVshl:
return "vshl";
case kVshll:
return "vshll";
case kVshr:
return "vshr";
case kVshrn:
return "vshrn";
case kVsli:
return "vsli";
case kVsqrt:
return "vsqrt";
case kVsra:
return "vsra";
case kVsri:
return "vsri";
case kVst1:
return "vst1";
case kVst2:
return "vst2";
case kVst3:
return "vst3";
case kVst4:
return "vst4";
case kVstm:
return "vstm";
case kVstmdb:
return "vstmdb";
case kVstmia:
return "vstmia";
case kVstr:
return "vstr";
case kVsub:
return "vsub";
case kVsubhn:
return "vsubhn";
case kVsubl:
return "vsubl";
case kVsubw:
return "vsubw";
case kVswp:
return "vswp";
case kVtbl:
return "vtbl";
case kVtbx:
return "vtbx";
case kVtrn:
return "vtrn";
case kVtst:
return "vtst";
case kVuzp:
return "vuzp";
case kVzip:
return "vzip";
case kYield:
return "yield";
case kUndefInstructionType:
VIXL_UNREACHABLE();
return "";
}
VIXL_UNREACHABLE();
return "";
} // NOLINT(readability/fn_size)
// End of generated code.
} // namespace aarch32
} // namespace vixl

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,742 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
extern "C" {
#include <stdint.h>
}
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include "utils-vixl.h"
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
namespace vixl {
namespace aarch32 {
bool Shift::IsValidAmount(uint32_t amount) const {
switch (GetType()) {
case LSL:
return amount <= 31;
case ROR:
return (amount > 0) && (amount <= 31);
case LSR:
case ASR:
return (amount > 0) && (amount <= 32);
case RRX:
return amount == 0;
default:
VIXL_UNREACHABLE();
return false;
}
}
std::ostream& operator<<(std::ostream& os, const Register reg) {
switch (reg.GetCode()) {
case 12:
return os << "ip";
case 13:
return os << "sp";
case 14:
return os << "lr";
case 15:
return os << "pc";
default:
return os << "r" << reg.GetCode();
}
}
SRegister VRegister::S() const {
VIXL_ASSERT(GetType() == kSRegister);
return SRegister(GetCode());
}
DRegister VRegister::D() const {
VIXL_ASSERT(GetType() == kDRegister);
return DRegister(GetCode());
}
QRegister VRegister::Q() const {
VIXL_ASSERT(GetType() == kQRegister);
return QRegister(GetCode());
}
Register RegisterList::GetFirstAvailableRegister() const {
for (uint32_t i = 0; i < kNumberOfRegisters; i++) {
if (((list_ >> i) & 1) != 0) return Register(i);
}
return Register();
}
std::ostream& PrintRegisterList(std::ostream& os, // NOLINT(runtime/references)
uint32_t list) {
os << "{";
bool first = true;
int code = 0;
while (list != 0) {
if ((list & 1) != 0) {
if (first) {
first = false;
} else {
os << ",";
}
os << Register(code);
}
list >>= 1;
code++;
}
os << "}";
return os;
}
std::ostream& operator<<(std::ostream& os, RegisterList registers) {
return PrintRegisterList(os, registers.GetList());
}
QRegister VRegisterList::GetFirstAvailableQRegister() const {
for (uint32_t i = 0; i < kNumberOfQRegisters; i++) {
if (((list_ >> (i * 4)) & 0xf) == 0xf) return QRegister(i);
}
return QRegister();
}
DRegister VRegisterList::GetFirstAvailableDRegister() const {
for (uint32_t i = 0; i < kMaxNumberOfDRegisters; i++) {
if (((list_ >> (i * 2)) & 0x3) == 0x3) return DRegister(i);
}
return DRegister();
}
SRegister VRegisterList::GetFirstAvailableSRegister() const {
for (uint32_t i = 0; i < kNumberOfSRegisters; i++) {
if (((list_ >> i) & 0x1) != 0) return SRegister(i);
}
return SRegister();
}
std::ostream& operator<<(std::ostream& os, SRegisterList reglist) {
SRegister first = reglist.GetFirstSRegister();
SRegister last = reglist.GetLastSRegister();
if (first.Is(last))
os << "{" << first << "}";
else
os << "{" << first << "-" << last << "}";
return os;
}
std::ostream& operator<<(std::ostream& os, DRegisterList reglist) {
DRegister first = reglist.GetFirstDRegister();
DRegister last = reglist.GetLastDRegister();
if (first.Is(last))
os << "{" << first << "}";
else
os << "{" << first << "-" << last << "}";
return os;
}
std::ostream& operator<<(std::ostream& os, NeonRegisterList nreglist) {
DRegister first = nreglist.GetFirstDRegister();
int increment = nreglist.IsSingleSpaced() ? 1 : 2;
int count =
nreglist.GetLastDRegister().GetCode() - first.GetCode() + increment;
if (count < 0) count += kMaxNumberOfDRegisters;
os << "{";
bool first_displayed = false;
for (;;) {
if (first_displayed) {
os << ",";
} else {
first_displayed = true;
}
os << first;
if (nreglist.IsTransferOneLane()) {
os << "[" << nreglist.GetTransferLane() << "]";
} else if (nreglist.IsTransferAllLanes()) {
os << "[]";
}
count -= increment;
if (count <= 0) break;
unsigned next = first.GetCode() + increment;
if (next >= kMaxNumberOfDRegisters) next -= kMaxNumberOfDRegisters;
first = DRegister(next);
}
os << "}";
return os;
}
const char* SpecialRegister::GetName() const {
switch (reg_) {
case APSR:
return "APSR";
case SPSR:
return "SPSR";
}
VIXL_UNREACHABLE();
return "??";
}
const char* MaskedSpecialRegister::GetName() const {
switch (reg_) {
case APSR_nzcvq:
return "APSR_nzcvq";
case APSR_g:
return "APSR_g";
case APSR_nzcvqg:
return "APSR_nzcvqg";
case CPSR_c:
return "CPSR_c";
case CPSR_x:
return "CPSR_x";
case CPSR_xc:
return "CPSR_xc";
case CPSR_sc:
return "CPSR_sc";
case CPSR_sx:
return "CPSR_sx";
case CPSR_sxc:
return "CPSR_sxc";
case CPSR_fc:
return "CPSR_fc";
case CPSR_fx:
return "CPSR_fx";
case CPSR_fxc:
return "CPSR_fxc";
case CPSR_fsc:
return "CPSR_fsc";
case CPSR_fsx:
return "CPSR_fsx";
case CPSR_fsxc:
return "CPSR_fsxc";
case SPSR_c:
return "SPSR_c";
case SPSR_x:
return "SPSR_x";
case SPSR_xc:
return "SPSR_xc";
case SPSR_s:
return "SPSR_s";
case SPSR_sc:
return "SPSR_sc";
case SPSR_sx:
return "SPSR_sx";
case SPSR_sxc:
return "SPSR_sxc";
case SPSR_f:
return "SPSR_f";
case SPSR_fc:
return "SPSR_fc";
case SPSR_fx:
return "SPSR_fx";
case SPSR_fxc:
return "SPSR_fxc";
case SPSR_fs:
return "SPSR_fs";
case SPSR_fsc:
return "SPSR_fsc";
case SPSR_fsx:
return "SPSR_fsx";
case SPSR_fsxc:
return "SPSR_fsxc";
}
VIXL_UNREACHABLE();
return "??";
}
const char* BankedRegister::GetName() const {
switch (reg_) {
case R8_usr:
return "R8_usr";
case R9_usr:
return "R9_usr";
case R10_usr:
return "R10_usr";
case R11_usr:
return "R11_usr";
case R12_usr:
return "R12_usr";
case SP_usr:
return "SP_usr";
case LR_usr:
return "LR_usr";
case R8_fiq:
return "R8_fiq";
case R9_fiq:
return "R9_fiq";
case R10_fiq:
return "R10_fiq";
case R11_fiq:
return "R11_fiq";
case R12_fiq:
return "R12_fiq";
case SP_fiq:
return "SP_fiq";
case LR_fiq:
return "LR_fiq";
case LR_irq:
return "LR_irq";
case SP_irq:
return "SP_irq";
case LR_svc:
return "LR_svc";
case SP_svc:
return "SP_svc";
case LR_abt:
return "LR_abt";
case SP_abt:
return "SP_abt";
case LR_und:
return "LR_und";
case SP_und:
return "SP_und";
case LR_mon:
return "LR_mon";
case SP_mon:
return "SP_mon";
case ELR_hyp:
return "ELR_hyp";
case SP_hyp:
return "SP_hyp";
case SPSR_fiq:
return "SPSR_fiq";
case SPSR_irq:
return "SPSR_irq";
case SPSR_svc:
return "SPSR_svc";
case SPSR_abt:
return "SPSR_abt";
case SPSR_und:
return "SPSR_und";
case SPSR_mon:
return "SPSR_mon";
case SPSR_hyp:
return "SPSR_hyp";
}
VIXL_UNREACHABLE();
return "??";
}
const char* SpecialFPRegister::GetName() const {
switch (reg_) {
case FPSID:
return "FPSID";
case FPSCR:
return "FPSCR";
case MVFR2:
return "MVFR2";
case MVFR1:
return "MVFR1";
case MVFR0:
return "MVFR0";
case FPEXC:
return "FPEXC";
}
VIXL_UNREACHABLE();
return "??";
}
const char* Condition::GetName() const {
switch (condition_) {
case eq:
return "eq";
case ne:
return "ne";
case cs:
return "cs";
case cc:
return "cc";
case mi:
return "mi";
case pl:
return "pl";
case vs:
return "vs";
case vc:
return "vc";
case hi:
return "hi";
case ls:
return "ls";
case ge:
return "ge";
case lt:
return "lt";
case gt:
return "gt";
case le:
return "le";
case al:
return "";
case Condition::kNone:
return "";
}
return "<und>";
}
const char* Shift::GetName() const {
switch (shift_) {
case LSL:
return "lsl";
case LSR:
return "lsr";
case ASR:
return "asr";
case ROR:
return "ror";
case RRX:
return "rrx";
}
VIXL_UNREACHABLE();
return "??";
}
const char* EncodingSize::GetName() const {
switch (size_) {
case Best:
case Narrow:
return "";
case Wide:
return ".w";
}
VIXL_UNREACHABLE();
return "??";
}
const char* DataType::GetName() const {
switch (value_) {
case kDataTypeValueInvalid:
return ".??";
case kDataTypeValueNone:
return "";
case S8:
return ".s8";
case S16:
return ".s16";
case S32:
return ".s32";
case S64:
return ".s64";
case U8:
return ".u8";
case U16:
return ".u16";
case U32:
return ".u32";
case U64:
return ".u64";
case F16:
return ".f16";
case F32:
return ".f32";
case F64:
return ".f64";
case I8:
return ".i8";
case I16:
return ".i16";
case I32:
return ".i32";
case I64:
return ".i64";
case P8:
return ".p8";
case P64:
return ".p64";
case Untyped8:
return ".8";
case Untyped16:
return ".16";
case Untyped32:
return ".32";
case Untyped64:
return ".64";
}
VIXL_UNREACHABLE();
return ".??";
}
const char* MemoryBarrier::GetName() const {
switch (type_) {
case OSHLD:
return "oshld";
case OSHST:
return "oshst";
case OSH:
return "osh";
case NSHLD:
return "nshld";
case NSHST:
return "nshst";
case NSH:
return "nsh";
case ISHLD:
return "ishld";
case ISHST:
return "ishst";
case ISH:
return "ish";
case LD:
return "ld";
case ST:
return "st";
case SY:
return "sy";
}
switch (static_cast<int>(type_)) {
case 0:
return "#0x0";
case 4:
return "#0x4";
case 8:
return "#0x8";
case 0xc:
return "#0xc";
}
VIXL_UNREACHABLE();
return "??";
}
const char* InterruptFlags::GetName() const {
switch (type_) {
case F:
return "f";
case I:
return "i";
case IF:
return "if";
case A:
return "a";
case AF:
return "af";
case AI:
return "ai";
case AIF:
return "aif";
}
VIXL_ASSERT(type_ == 0);
return "";
}
const char* Endianness::GetName() const {
switch (type_) {
case LE:
return "le";
case BE:
return "be";
}
VIXL_UNREACHABLE();
return "??";
}
// Constructor used for disassembly.
ImmediateShiftOperand::ImmediateShiftOperand(int shift_value, int amount_value)
: Shift(shift_value) {
switch (shift_value) {
case LSL:
amount_ = amount_value;
break;
case LSR:
case ASR:
amount_ = (amount_value == 0) ? 32 : amount_value;
break;
case ROR:
amount_ = amount_value;
if (amount_value == 0) SetType(RRX);
break;
default:
VIXL_UNREACHABLE();
SetType(LSL);
amount_ = 0;
break;
}
}
ImmediateT32::ImmediateT32(uint32_t imm) {
// 00000000 00000000 00000000 abcdefgh
if ((imm & ~0xff) == 0) {
SetEncodingValue(imm);
return;
}
if ((imm >> 16) == (imm & 0xffff)) {
if ((imm & 0xff00) == 0) {
// 00000000 abcdefgh 00000000 abcdefgh
SetEncodingValue((imm & 0xff) | (0x1 << 8));
return;
}
if ((imm & 0xff) == 0) {
// abcdefgh 00000000 abcdefgh 00000000
SetEncodingValue(((imm >> 8) & 0xff) | (0x2 << 8));
return;
}
if (((imm >> 8) & 0xff) == (imm & 0xff)) {
// abcdefgh abcdefgh abcdefgh abcdefgh
SetEncodingValue((imm & 0xff) | (0x3 << 8));
return;
}
}
for (int shift = 0; shift < 24; shift++) {
uint32_t imm8 = imm >> (24 - shift);
uint32_t overflow = imm << (8 + shift);
if ((imm8 <= 0xff) && ((imm8 & 0x80) != 0) && (overflow == 0)) {
SetEncodingValue(((shift + 8) << 7) | (imm8 & 0x7F));
return;
}
}
}
static inline uint32_t ror(uint32_t x, int i) {
VIXL_ASSERT((0 < i) && (i < 32));
return (x >> i) | (x << (32 - i));
}
bool ImmediateT32::IsImmediateT32(uint32_t imm) {
/* abcdefgh abcdefgh abcdefgh abcdefgh */
if ((imm ^ ror(imm, 8)) == 0) return true;
/* 00000000 abcdefgh 00000000 abcdefgh */
/* abcdefgh 00000000 abcdefgh 00000000 */
if ((imm ^ ror(imm, 16)) == 0 &&
(((imm & 0xff00) == 0) || ((imm & 0xff) == 0)))
return true;
/* isolate least-significant set bit */
uint32_t lsb = imm & -imm;
/* if imm is less than lsb*256 then it fits, but instead we test imm/256 to
* avoid overflow (underflow is always a successful case) */
return ((imm >> 8) < lsb);
}
uint32_t ImmediateT32::Decode(uint32_t value) {
uint32_t base = value & 0xff;
switch (value >> 8) {
case 0:
return base;
case 1:
return base | (base << 16);
case 2:
return (base << 8) | (base << 24);
case 3:
return base | (base << 8) | (base << 16) | (base << 24);
default:
base |= 0x80;
return base << (32 - (value >> 7));
}
}
ImmediateA32::ImmediateA32(uint32_t imm) {
// Deal with rot = 0 first to avoid undefined shift by 32.
if (imm <= 0xff) {
SetEncodingValue(imm);
return;
}
for (int rot = 2; rot < 32; rot += 2) {
uint32_t imm8 = (imm << rot) | (imm >> (32 - rot));
if (imm8 <= 0xff) {
SetEncodingValue((rot << 7) | imm8);
return;
}
}
}
bool ImmediateA32::IsImmediateA32(uint32_t imm) {
/* fast-out */
if (imm < 256) return true;
/* avoid getting confused by wrapped-around bytes (this transform has no
* effect on pass/fail results) */
if (imm & 0xff000000) imm = ror(imm, 16);
/* copy odd-numbered set bits into even-numbered bits immediately below, so
* that the least-significant set bit is always an even bit */
imm = imm | ((imm >> 1) & 0x55555555);
/* isolate least-significant set bit (always even) */
uint32_t lsb = imm & -imm;
/* if imm is less than lsb*256 then it fits, but instead we test imm/256 to
* avoid overflow (underflow is always a successful case) */
return ((imm >> 8) < lsb);
}
uint32_t ImmediateA32::Decode(uint32_t value) {
int rotation = (value >> 8) * 2;
VIXL_ASSERT(rotation >= 0);
VIXL_ASSERT(rotation <= 30);
value &= 0xff;
if (rotation == 0) return value;
return (value >> rotation) | (value << (32 - rotation));
}
uint32_t TypeEncodingValue(Shift shift) {
return shift.IsRRX() ? kRRXEncodedValue : shift.GetValue();
}
uint32_t AmountEncodingValue(Shift shift, uint32_t amount) {
switch (shift.GetType()) {
case LSL:
case ROR:
return amount;
case LSR:
case ASR:
return amount % 32;
case RRX:
return 0;
}
return 0;
}
} // namespace aarch32
} // namespace vixl

View file

@ -0,0 +1,152 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "location-aarch32.h"
#include "assembler-aarch32.h"
#include "macro-assembler-aarch32.h"
namespace vixl {
namespace aarch32 {
bool Location::Needs16BitPadding(int32_t location) const {
if (!HasForwardReferences()) return false;
const ForwardRef& last_ref = GetLastForwardReference();
int32_t min_location_last_ref = last_ref.GetMinLocation();
VIXL_ASSERT(min_location_last_ref - location <= 2);
return (min_location_last_ref > location);
}
void Location::ResolveReferences(internal::AssemblerBase* assembler) {
// Iterate over references and call EncodeLocationFor on each of them.
for (ForwardRefListIterator it(this); !it.Done(); it.Advance()) {
const ForwardRef& reference = *it.Current();
VIXL_ASSERT(reference.LocationIsEncodable(location_));
int32_t from = reference.GetLocation();
EncodeLocationFor(assembler, from, reference.op());
}
forward_.clear();
}
static bool Is16BitEncoding(uint16_t instr) {
return instr < (kLowestT32_32Opcode >> 16);
}
void Location::EncodeLocationFor(internal::AssemblerBase* assembler,
int32_t from,
const Location::EmitOperator* encoder) {
if (encoder->IsUsingT32()) {
uint16_t* instr_ptr =
assembler->GetBuffer()->GetOffsetAddress<uint16_t*>(from);
if (Is16BitEncoding(instr_ptr[0])) {
// The Encode methods always deals with uint32_t types so we need
// to explicitly cast it.
uint32_t instr = static_cast<uint32_t>(instr_ptr[0]);
instr = encoder->Encode(instr, from, this);
// The Encode method should not ever set the top 16 bits.
VIXL_ASSERT((instr & ~0xffff) == 0);
instr_ptr[0] = static_cast<uint16_t>(instr);
} else {
uint32_t instr =
instr_ptr[1] | (static_cast<uint32_t>(instr_ptr[0]) << 16);
instr = encoder->Encode(instr, from, this);
instr_ptr[0] = static_cast<uint16_t>(instr >> 16);
instr_ptr[1] = static_cast<uint16_t>(instr);
}
} else {
uint32_t* instr_ptr =
assembler->GetBuffer()->GetOffsetAddress<uint32_t*>(from);
instr_ptr[0] = encoder->Encode(instr_ptr[0], from, this);
}
}
void Location::AddForwardRef(int32_t instr_location,
const EmitOperator& op,
const ReferenceInfo* info) {
VIXL_ASSERT(referenced_);
int32_t from = instr_location + (op.IsUsingT32() ? kT32PcDelta : kA32PcDelta);
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc)
from = AlignDown(from, 4);
int32_t min_object_location = from + info->min_offset;
int32_t max_object_location = from + info->max_offset;
forward_.insert(ForwardRef(&op,
instr_location,
info->size,
min_object_location,
max_object_location,
info->alignment));
}
int Location::GetMaxAlignment() const {
int max_alignment = GetPoolObjectAlignment();
for (ForwardRefListIterator it(const_cast<Location*>(this)); !it.Done();
it.Advance()) {
const ForwardRef& reference = *it.Current();
if (reference.GetAlignment() > max_alignment)
max_alignment = reference.GetAlignment();
}
return max_alignment;
}
int Location::GetMinLocation() const {
int32_t min_location = 0;
for (ForwardRefListIterator it(const_cast<Location*>(this)); !it.Done();
it.Advance()) {
const ForwardRef& reference = *it.Current();
if (reference.GetMinLocation() > min_location)
min_location = reference.GetMinLocation();
}
return min_location;
}
void Label::UpdatePoolObject(PoolObject<int32_t>* object) {
VIXL_ASSERT(forward_.size() == 1);
const ForwardRef& reference = forward_.Front();
object->Update(reference.GetMinLocation(),
reference.GetMaxLocation(),
reference.GetAlignment());
}
void Label::EmitPoolObject(MacroAssemblerInterface* masm) {
MacroAssembler* macro_assembler = static_cast<MacroAssembler*>(masm);
// Add a new branch to this label.
macro_assembler->GetBuffer()->EnsureSpaceFor(kMaxInstructionSizeInBytes);
ExactAssemblyScopeWithoutPoolsCheck guard(macro_assembler,
kMaxInstructionSizeInBytes,
ExactAssemblyScope::kMaximumSize);
macro_assembler->b(this);
}
void RawLiteral::EmitPoolObject(MacroAssemblerInterface* masm) {
Assembler* assembler = static_cast<Assembler*>(masm->AsAssemblerBase());
assembler->GetBuffer()->EnsureSpaceFor(GetSize());
assembler->GetBuffer()->EmitData(GetDataAddress(), GetSize());
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,563 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may
// be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
extern "C" {
#include <inttypes.h>
#include <stdint.h>
}
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iomanip>
#include <iostream>
#include "utils-vixl.h"
#include "aarch32/constants-aarch32.h"
#include "aarch32/instructions-aarch32.h"
#include "aarch32/operands-aarch32.h"
namespace vixl {
namespace aarch32 {
// Operand
std::ostream& operator<<(std::ostream& os, const Operand& operand) {
if (operand.IsImmediate()) {
return os << "#" << operand.GetImmediate();
}
if (operand.IsImmediateShiftedRegister()) {
if ((operand.GetShift().IsLSL() || operand.GetShift().IsROR()) &&
(operand.GetShiftAmount() == 0)) {
return os << operand.GetBaseRegister();
}
if (operand.GetShift().IsRRX()) {
return os << operand.GetBaseRegister() << ", rrx";
}
return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " #"
<< operand.GetShiftAmount();
}
if (operand.IsRegisterShiftedRegister()) {
return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " "
<< operand.GetShiftRegister();
}
VIXL_UNREACHABLE();
return os;
}
std::ostream& operator<<(std::ostream& os, const NeonImmediate& neon_imm) {
if (neon_imm.IsDouble()) {
if (neon_imm.imm_.d_ == 0) {
if (copysign(1.0, neon_imm.imm_.d_) < 0.0) {
return os << "#-0.0";
}
return os << "#0.0";
}
return os << "#" << std::setprecision(9) << neon_imm.imm_.d_;
}
if (neon_imm.IsFloat()) {
if (neon_imm.imm_.f_ == 0) {
if (copysign(1.0, neon_imm.imm_.d_) < 0.0) return os << "#-0.0";
return os << "#0.0";
}
return os << "#" << std::setprecision(9) << neon_imm.imm_.f_;
}
if (neon_imm.IsInteger64()) {
return os << "#0x" << std::hex << std::setw(16) << std::setfill('0')
<< neon_imm.imm_.u64_ << std::dec;
}
return os << "#" << neon_imm.imm_.u32_;
}
// SOperand
std::ostream& operator<<(std::ostream& os, const SOperand& operand) {
if (operand.IsImmediate()) {
return os << operand.GetNeonImmediate();
}
return os << operand.GetRegister();
}
// DOperand
std::ostream& operator<<(std::ostream& os, const DOperand& operand) {
if (operand.IsImmediate()) {
return os << operand.GetNeonImmediate();
}
return os << operand.GetRegister();
}
// QOperand
std::ostream& operator<<(std::ostream& os, const QOperand& operand) {
if (operand.IsImmediate()) {
return os << operand.GetNeonImmediate();
}
return os << operand.GetRegister();
}
ImmediateVbic::ImmediateVbic(DataType dt, const NeonImmediate& neon_imm) {
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
if (dt.GetValue() == I16) {
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x9);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0xb);
SetEncodedImmediate(immediate >> 8);
}
} else if (dt.GetValue() == I32) {
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x1);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0x3);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0) {
SetEncodingValue(0x5);
SetEncodedImmediate(immediate >> 16);
} else if ((immediate & ~0xff000000) == 0) {
SetEncodingValue(0x7);
SetEncodedImmediate(immediate >> 24);
}
}
}
}
DataType ImmediateVbic::DecodeDt(uint32_t cmode) {
switch (cmode) {
case 0x1:
case 0x3:
case 0x5:
case 0x7:
return I32;
case 0x9:
case 0xb:
return I16;
default:
break;
}
VIXL_UNREACHABLE();
return kDataTypeValueInvalid;
}
NeonImmediate ImmediateVbic::DecodeImmediate(uint32_t cmode,
uint32_t immediate) {
switch (cmode) {
case 0x1:
case 0x9:
return immediate;
case 0x3:
case 0xb:
return immediate << 8;
case 0x5:
return immediate << 16;
case 0x7:
return immediate << 24;
default:
break;
}
VIXL_UNREACHABLE();
return 0;
}
ImmediateVmov::ImmediateVmov(DataType dt, const NeonImmediate& neon_imm) {
if (neon_imm.IsInteger()) {
switch (dt.GetValue()) {
case I8:
if (neon_imm.CanConvert<uint8_t>()) {
SetEncodingValue(0xe);
SetEncodedImmediate(neon_imm.GetImmediate<uint8_t>());
}
break;
case I16:
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x8);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0xa);
SetEncodedImmediate(immediate >> 8);
}
}
break;
case I32:
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x0);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0x2);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0) {
SetEncodingValue(0x4);
SetEncodedImmediate(immediate >> 16);
} else if ((immediate & ~0xff000000) == 0) {
SetEncodingValue(0x6);
SetEncodedImmediate(immediate >> 24);
} else if ((immediate & ~0xff00) == 0xff) {
SetEncodingValue(0xc);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0xffff) {
SetEncodingValue(0xd);
SetEncodedImmediate(immediate >> 16);
}
}
break;
case I64: {
bool is_valid = true;
uint32_t encoding = 0;
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
uint32_t mask = 0xff000000;
for (uint32_t set_bit = 1 << 3; set_bit != 0; set_bit >>= 1) {
if ((immediate & mask) == mask) {
encoding |= set_bit;
} else if ((immediate & mask) != 0) {
is_valid = false;
break;
}
mask >>= 8;
}
} else {
uint64_t immediate = neon_imm.GetImmediate<uint64_t>();
uint64_t mask = UINT64_C(0xff) << 56;
for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) {
if ((immediate & mask) == mask) {
encoding |= set_bit;
} else if ((immediate & mask) != 0) {
is_valid = false;
break;
}
mask >>= 8;
}
}
if (is_valid) {
SetEncodingValue(0x1e);
SetEncodedImmediate(encoding);
}
break;
}
default:
break;
}
} else {
switch (dt.GetValue()) {
case F32:
if (neon_imm.IsFloat() || neon_imm.IsDouble()) {
ImmediateVFP vfp(neon_imm.GetImmediate<float>());
if (vfp.IsValid()) {
SetEncodingValue(0xf);
SetEncodedImmediate(vfp.GetEncodingValue());
}
}
break;
default:
break;
}
}
}
DataType ImmediateVmov::DecodeDt(uint32_t cmode) {
switch (cmode & 0xf) {
case 0x0:
case 0x2:
case 0x4:
case 0x6:
case 0xc:
case 0xd:
return I32;
case 0x8:
case 0xa:
return I16;
case 0xe:
return ((cmode & 0x10) == 0) ? I8 : I64;
case 0xf:
if ((cmode & 0x10) == 0) return F32;
break;
default:
break;
}
VIXL_UNREACHABLE();
return kDataTypeValueInvalid;
}
NeonImmediate ImmediateVmov::DecodeImmediate(uint32_t cmode,
uint32_t immediate) {
switch (cmode & 0xf) {
case 0x8:
case 0x0:
return immediate;
case 0x2:
case 0xa:
return immediate << 8;
case 0x4:
return immediate << 16;
case 0x6:
return immediate << 24;
case 0xc:
return (immediate << 8) | 0xff;
case 0xd:
return (immediate << 16) | 0xffff;
case 0xe: {
if (cmode == 0x1e) {
uint64_t encoding = 0;
for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) {
encoding <<= 8;
if ((immediate & set_bit) != 0) {
encoding |= 0xff;
}
}
return encoding;
} else {
return immediate;
}
}
case 0xf: {
return ImmediateVFP::Decode<float>(immediate);
}
default:
break;
}
VIXL_UNREACHABLE();
return 0;
}
ImmediateVmvn::ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm) {
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
switch (dt.GetValue()) {
case I16:
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x8);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0xa);
SetEncodedImmediate(immediate >> 8);
}
break;
case I32:
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x0);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0x2);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0) {
SetEncodingValue(0x4);
SetEncodedImmediate(immediate >> 16);
} else if ((immediate & ~0xff000000) == 0) {
SetEncodingValue(0x6);
SetEncodedImmediate(immediate >> 24);
} else if ((immediate & ~0xff00) == 0xff) {
SetEncodingValue(0xc);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0xffff) {
SetEncodingValue(0xd);
SetEncodedImmediate(immediate >> 16);
}
break;
default:
break;
}
}
}
DataType ImmediateVmvn::DecodeDt(uint32_t cmode) {
switch (cmode) {
case 0x0:
case 0x2:
case 0x4:
case 0x6:
case 0xc:
case 0xd:
return I32;
case 0x8:
case 0xa:
return I16;
default:
break;
}
VIXL_UNREACHABLE();
return kDataTypeValueInvalid;
}
NeonImmediate ImmediateVmvn::DecodeImmediate(uint32_t cmode,
uint32_t immediate) {
switch (cmode) {
case 0x0:
case 0x8:
return immediate;
case 0x2:
case 0xa:
return immediate << 8;
case 0x4:
return immediate << 16;
case 0x6:
return immediate << 24;
case 0xc:
return (immediate << 8) | 0xff;
case 0xd:
return (immediate << 16) | 0xffff;
default:
break;
}
VIXL_UNREACHABLE();
return 0;
}
ImmediateVorr::ImmediateVorr(DataType dt, const NeonImmediate& neon_imm) {
if (neon_imm.IsInteger32()) {
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
if (dt.GetValue() == I16) {
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x9);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0xb);
SetEncodedImmediate(immediate >> 8);
}
} else if (dt.GetValue() == I32) {
if ((immediate & ~0xff) == 0) {
SetEncodingValue(0x1);
SetEncodedImmediate(immediate);
} else if ((immediate & ~0xff00) == 0) {
SetEncodingValue(0x3);
SetEncodedImmediate(immediate >> 8);
} else if ((immediate & ~0xff0000) == 0) {
SetEncodingValue(0x5);
SetEncodedImmediate(immediate >> 16);
} else if ((immediate & ~0xff000000) == 0) {
SetEncodingValue(0x7);
SetEncodedImmediate(immediate >> 24);
}
}
}
}
DataType ImmediateVorr::DecodeDt(uint32_t cmode) {
switch (cmode) {
case 0x1:
case 0x3:
case 0x5:
case 0x7:
return I32;
case 0x9:
case 0xb:
return I16;
default:
break;
}
VIXL_UNREACHABLE();
return kDataTypeValueInvalid;
}
NeonImmediate ImmediateVorr::DecodeImmediate(uint32_t cmode,
uint32_t immediate) {
switch (cmode) {
case 0x1:
case 0x9:
return immediate;
case 0x3:
case 0xb:
return immediate << 8;
case 0x5:
return immediate << 16;
case 0x7:
return immediate << 24;
default:
break;
}
VIXL_UNREACHABLE();
return 0;
}
// MemOperand
std::ostream& operator<<(std::ostream& os, const MemOperand& operand) {
os << "[" << operand.GetBaseRegister();
if (operand.GetAddrMode() == PostIndex) {
os << "]";
if (operand.IsRegisterOnly()) return os << "!";
}
if (operand.IsImmediate()) {
if ((operand.GetOffsetImmediate() != 0) || operand.GetSign().IsMinus() ||
((operand.GetAddrMode() != Offset) && !operand.IsRegisterOnly())) {
if (operand.GetOffsetImmediate() == 0) {
os << ", #" << operand.GetSign() << operand.GetOffsetImmediate();
} else {
os << ", #" << operand.GetOffsetImmediate();
}
}
} else if (operand.IsPlainRegister()) {
os << ", " << operand.GetSign() << operand.GetOffsetRegister();
} else if (operand.IsShiftedRegister()) {
os << ", " << operand.GetSign() << operand.GetOffsetRegister()
<< ImmediateShiftOperand(operand.GetShift(), operand.GetShiftAmount());
} else {
VIXL_UNREACHABLE();
return os;
}
if (operand.GetAddrMode() == Offset) {
os << "]";
} else if (operand.GetAddrMode() == PreIndex) {
os << "]!";
}
return os;
}
std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand) {
os << "[" << operand.GetBaseRegister() << operand.GetAlignment() << "]";
if (operand.GetAddrMode() == PostIndex) {
if (operand.IsPlainRegister()) {
os << ", " << operand.GetOffsetRegister();
} else {
os << "!";
}
}
return os;
}
} // namespace aarch32
} // namespace vixl

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,178 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../utils-vixl.h"
#include "cpu-aarch64.h"
namespace vixl {
namespace aarch64 {
// Initialise to smallest possible cache size.
unsigned CPU::dcache_line_size_ = 1;
unsigned CPU::icache_line_size_ = 1;
// Currently computes I and D cache line size.
void CPU::SetUp() {
uint32_t cache_type_register = GetCacheType();
// The cache type register holds information about the caches, including I
// D caches line size.
static const int kDCacheLineSizeShift = 16;
static const int kICacheLineSizeShift = 0;
static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
// The cache type register holds the size of the I and D caches in words as
// a power of two.
uint32_t dcache_line_size_power_of_two =
(cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
uint32_t icache_line_size_power_of_two =
(cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
dcache_line_size_ = 4 << dcache_line_size_power_of_two;
icache_line_size_ = 4 << icache_line_size_power_of_two;
}
uint32_t CPU::GetCacheType() {
#ifdef __aarch64__
uint64_t cache_type_register;
// Copy the content of the cache type register to a core register.
__asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT(runtime/references)
: [ctr] "=r"(cache_type_register));
VIXL_ASSERT(IsUint32(cache_type_register));
return static_cast<uint32_t>(cache_type_register);
#else
// This will lead to a cache with 1 byte long lines, which is fine since
// neither EnsureIAndDCacheCoherency nor the simulator will need this
// information.
return 0;
#endif
}
void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) {
#ifdef __aarch64__
// Implement the cache synchronisation for all targets where AArch64 is the
// host, even if we're building the simulator for an AAarch64 host. This
// allows for cases where the user wants to simulate code as well as run it
// natively.
if (length == 0) {
return;
}
// The code below assumes user space cache operations are allowed.
// Work out the line sizes for each cache, and use them to determine the
// start addresses.
uintptr_t start = reinterpret_cast<uintptr_t>(address);
uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_);
uintptr_t isize = static_cast<uintptr_t>(icache_line_size_);
uintptr_t dline = start & ~(dsize - 1);
uintptr_t iline = start & ~(isize - 1);
// Cache line sizes are always a power of 2.
VIXL_ASSERT(IsPowerOf2(dsize));
VIXL_ASSERT(IsPowerOf2(isize));
uintptr_t end = start + length;
do {
__asm__ __volatile__(
// Clean each line of the D cache containing the target data.
//
// dc : Data Cache maintenance
// c : Clean
// va : by (Virtual) Address
// u : to the point of Unification
// The point of unification for a processor is the point by which the
// instruction and data caches are guaranteed to see the same copy of a
// memory location. See ARM DDI 0406B page B2-12 for more information.
" dc cvau, %[dline]\n"
:
: [dline] "r"(dline)
// This code does not write to memory, but the "memory" dependency
// prevents GCC from reordering the code.
: "memory");
dline += dsize;
} while (dline < end);
__asm__ __volatile__(
// Make sure that the data cache operations (above) complete before the
// instruction cache operations (below).
//
// dsb : Data Synchronisation Barrier
// ish : Inner SHareable domain
//
// The point of unification for an Inner Shareable shareability domain is
// the point by which the instruction and data caches of all the
// processors
// in that Inner Shareable shareability domain are guaranteed to see the
// same copy of a memory location. See ARM DDI 0406B page B2-12 for more
// information.
" dsb ish\n"
:
:
: "memory");
do {
__asm__ __volatile__(
// Invalidate each line of the I cache containing the target data.
//
// ic : Instruction Cache maintenance
// i : Invalidate
// va : by Address
// u : to the point of Unification
" ic ivau, %[iline]\n"
:
: [iline] "r"(iline)
: "memory");
iline += isize;
} while (iline < end);
__asm__ __volatile__(
// Make sure that the instruction cache operations (above) take effect
// before the isb (below).
" dsb ish\n"
// Ensure that any instructions already in the pipeline are discarded and
// reloaded from the new data.
// isb : Instruction Synchronisation Barrier
" isb\n"
:
:
: "memory");
#else
// If the host isn't AArch64, we must be using the simulator, so this function
// doesn't have to do anything.
USE(address, length);
#endif
}
} // namespace aarch64
} // namespace vixl

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,713 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "instructions-aarch64.h"
#include "assembler-aarch64.h"
namespace vixl {
namespace aarch64 {
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
uint64_t value,
unsigned width) {
VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
uint64_t result = value & ((UINT64_C(1) << width) - 1);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
}
return result;
}
bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
switch (op) {
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_w:
case LDRSB_x:
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_b:
case LDR_h:
case LDR_s:
case LDR_d:
case LDR_q:
return true;
default:
return false;
}
}
}
bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_b:
case STR_h:
case STR_s:
case STR_d:
case STR_q:
return true;
default:
return false;
}
}
}
// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are
// not met.
uint64_t Instruction::GetImmLogical() const {
unsigned reg_size = GetSixtyFourBits() ? kXRegSize : kWRegSize;
int32_t n = GetBitN();
int32_t imm_s = GetImmSetBits();
int32_t imm_r = GetImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
//
// N imms immr size S R
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
// (s bits must not be all set)
//
// A pattern is constructed of size bits, where the least significant S+1
// bits are set. The pattern is rotated right by R, and repeated across a
// 32 or 64-bit value, depending on destination register width.
//
if (n == 1) {
if (imm_s == 0x3f) {
return 0;
}
uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
return RotateRight(bits, imm_r, 64);
} else {
if ((imm_s >> 1) == 0x1f) {
return 0;
}
for (int width = 0x20; width >= 0x2; width >>= 1) {
if ((imm_s & width) == 0) {
int mask = width - 1;
if ((imm_s & mask) == mask) {
return 0;
}
uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
return RepeatBitsAcrossReg(reg_size,
RotateRight(bits, imm_r & mask, width),
width);
}
}
}
VIXL_UNREACHABLE();
return 0;
}
uint32_t Instruction::GetImmNEONabcdefgh() const {
return GetImmNEONabc() << 5 | GetImmNEONdefgh();
}
Float16 Instruction::Imm8ToFloat16(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Half: aBbb.cdef.gh00.0000 (16 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint16_t bit7 = (bits >> 7) & 0x1;
uint16_t bit6 = (bits >> 6) & 0x1;
uint16_t bit5_to_0 = bits & 0x3f;
uint16_t result = (bit7 << 15) | ((4 - bit6) << 12) | (bit5_to_0 << 6);
return RawbitsToFloat16(result);
}
float Instruction::Imm8ToFP32(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return RawbitsToFloat(result);
}
Float16 Instruction::GetImmFP16() const { return Imm8ToFloat16(GetImmFP()); }
float Instruction::GetImmFP32() const { return Imm8ToFP32(GetImmFP()); }
double Instruction::Imm8ToFP64(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
return RawbitsToDouble(result);
}
double Instruction::GetImmFP64() const { return Imm8ToFP64(GetImmFP()); }
Float16 Instruction::GetImmNEONFP16() const {
return Imm8ToFloat16(GetImmNEONabcdefgh());
}
float Instruction::GetImmNEONFP32() const {
return Imm8ToFP32(GetImmNEONabcdefgh());
}
double Instruction::GetImmNEONFP64() const {
return Imm8ToFP64(GetImmNEONabcdefgh());
}
unsigned CalcLSDataSize(LoadStoreOp op) {
VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
// and "opc" fields.
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
size = kQRegSizeInBytesLog2;
}
}
return size;
}
unsigned CalcLSPairDataSize(LoadStorePairOp op) {
VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
switch (op) {
case STP_q:
case LDP_q:
return kQRegSizeInBytesLog2;
case STP_x:
case LDP_x:
case STP_d:
case LDP_d:
return kXRegSizeInBytesLog2;
default:
return kWRegSizeInBytesLog2;
}
}
int Instruction::GetImmBranchRangeBitwidth(ImmBranchType branch_type) {
switch (branch_type) {
case UncondBranchType:
return ImmUncondBranch_width;
case CondBranchType:
return ImmCondBranch_width;
case CompareBranchType:
return ImmCmpBranch_width;
case TestBranchType:
return ImmTestBranch_width;
default:
VIXL_UNREACHABLE();
return 0;
}
}
int32_t Instruction::GetImmBranchForwardRange(ImmBranchType branch_type) {
int32_t encoded_max = 1 << (GetImmBranchRangeBitwidth(branch_type) - 1);
return encoded_max * kInstructionSize;
}
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int64_t offset) {
return IsIntN(GetImmBranchRangeBitwidth(branch_type), offset);
}
const Instruction* Instruction::GetImmPCOffsetTarget() const {
const Instruction* base = this;
ptrdiff_t offset;
if (IsPCRelAddressing()) {
// ADR and ADRP.
offset = GetImmPCRel();
if (Mask(PCRelAddressingMask) == ADRP) {
base = AlignDown(base, kPageSize);
offset *= kPageSize;
} else {
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
}
} else {
// All PC-relative branches.
VIXL_ASSERT(GetBranchType() != UnknownBranchType);
// Relative branch offsets are instruction-size-aligned.
offset = GetImmBranch() * static_cast<int>(kInstructionSize);
}
return base + offset;
}
int Instruction::GetImmBranch() const {
switch (GetBranchType()) {
case CondBranchType:
return GetImmCondBranch();
case UncondBranchType:
return GetImmUncondBranch();
case CompareBranchType:
return GetImmCmpBranch();
case TestBranchType:
return GetImmTestBranch();
default:
VIXL_UNREACHABLE();
}
return 0;
}
void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(target);
} else {
SetBranchImmTarget(target);
}
}
void Instruction::SetPCRelImmTarget(const Instruction* target) {
ptrdiff_t imm21;
if ((Mask(PCRelAddressingMask) == ADR)) {
imm21 = target - this;
} else {
VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
imm21 = target_page - this_page;
}
Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
}
void Instruction::SetBranchImmTarget(const Instruction* target) {
VIXL_ASSERT(((target - this) & 3) == 0);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
switch (GetBranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
imm_mask = ImmCondBranch_mask;
break;
}
case UncondBranchType: {
branch_imm = Assembler::ImmUncondBranch(offset);
imm_mask = ImmUncondBranch_mask;
break;
}
case CompareBranchType: {
branch_imm = Assembler::ImmCmpBranch(offset);
imm_mask = ImmCmpBranch_mask;
break;
}
case TestBranchType: {
branch_imm = Assembler::ImmTestBranch(offset);
imm_mask = ImmTestBranch_mask;
break;
}
default:
VIXL_UNREACHABLE();
}
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
void Instruction::SetImmLLiteral(const Instruction* source) {
VIXL_ASSERT(IsWordAligned(source));
ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm);
}
VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
vform == kFormatH || vform == kFormatS || vform == kFormatD);
switch (vform) {
case kFormat8H:
return kFormat8B;
case kFormat4S:
return kFormat4H;
case kFormat2D:
return kFormat2S;
case kFormatH:
return kFormatB;
case kFormatS:
return kFormatH;
case kFormatD:
return kFormatS;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
vform == kFormatB || vform == kFormatH || vform == kFormatS);
switch (vform) {
case kFormat8B:
return kFormat8H;
case kFormat4H:
return kFormat4S;
case kFormat2S:
return kFormat2D;
case kFormatB:
return kFormatH;
case kFormatH:
return kFormatS;
case kFormatS:
return kFormatD;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatFillQ(VectorFormat vform) {
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return kFormat16B;
case kFormatH:
case kFormat4H:
case kFormat8H:
return kFormat8H;
case kFormatS:
case kFormat2S:
case kFormat4S:
return kFormat4S;
case kFormatD:
case kFormat1D:
case kFormat2D:
return kFormat2D;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
switch (vform) {
case kFormat4H:
return kFormat8B;
case kFormat8H:
return kFormat16B;
case kFormat2S:
return kFormat4H;
case kFormat4S:
return kFormat8H;
case kFormat1D:
return kFormat2S;
case kFormat2D:
return kFormat4S;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
switch (vform) {
case kFormat8B:
return kFormat16B;
case kFormat4H:
return kFormat8H;
case kFormat2S:
return kFormat4S;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
switch (vform) {
case kFormat16B:
return kFormat8B;
case kFormat8H:
return kFormat4H;
case kFormat4S:
return kFormat2S;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat ScalarFormatFromLaneSize(int laneSize) {
switch (laneSize) {
case 8:
return kFormatB;
case 16:
return kFormatH;
case 32:
return kFormatS;
case 64:
return kFormatD;
default:
VIXL_UNREACHABLE();
return kFormatUndefined;
}
}
VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
}
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
return kBRegSize;
case kFormatH:
return kHRegSize;
case kFormatS:
case kFormat2H:
return kSRegSize;
case kFormatD:
return kDRegSize;
case kFormat8B:
case kFormat4H:
case kFormat2S:
case kFormat1D:
return kDRegSize;
default:
return kQRegSize;
}
}
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
return RegisterSizeInBitsFromFormat(vform) / 8;
}
unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 8;
case kFormatH:
case kFormat2H:
case kFormat4H:
case kFormat8H:
return 16;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 32;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 64;
default:
VIXL_UNREACHABLE();
return 0;
}
}
int LaneSizeInBytesFromFormat(VectorFormat vform) {
return LaneSizeInBitsFromFormat(vform) / 8;
}
int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 0;
case kFormatH:
case kFormat2H:
case kFormat4H:
case kFormat8H:
return 1;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 2;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 3;
default:
VIXL_UNREACHABLE();
return 0;
}
}
int LaneCountFromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormat16B:
return 16;
case kFormat8B:
case kFormat8H:
return 8;
case kFormat4H:
case kFormat4S:
return 4;
case kFormat2H:
case kFormat2S:
case kFormat2D:
return 2;
case kFormat1D:
case kFormatB:
case kFormatH:
case kFormatS:
case kFormatD:
return 1;
default:
VIXL_UNREACHABLE();
return 0;
}
}
int MaxLaneCountFromFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 16;
case kFormatH:
case kFormat4H:
case kFormat8H:
return 8;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 4;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 2;
default:
VIXL_UNREACHABLE();
return 0;
}
}
// Does 'vform' indicate a vector format or a scalar format?
bool IsVectorFormat(VectorFormat vform) {
VIXL_ASSERT(vform != kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormatH:
case kFormatS:
case kFormatD:
return false;
default:
return true;
}
}
int64_t MaxIntFromFormat(VectorFormat vform) {
return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
}
int64_t MinIntFromFormat(VectorFormat vform) {
return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
}
uint64_t MaxUintFromFormat(VectorFormat vform) {
return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
}
} // namespace aarch64
} // namespace vixl

View file

@ -0,0 +1,916 @@
// Copyright 2014, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "instrument-aarch64.h"
namespace vixl {
namespace aarch64 {
Counter::Counter(const char* name, CounterType type)
: count_(0), enabled_(false), type_(type) {
VIXL_ASSERT(name != NULL);
strncpy(name_, name, kCounterNameMaxLength);
// Make sure `name_` is always NULL-terminated, even if the source's length is
// higher.
name_[kCounterNameMaxLength - 1] = '\0';
}
void Counter::Enable() { enabled_ = true; }
void Counter::Disable() { enabled_ = false; }
bool Counter::IsEnabled() { return enabled_; }
void Counter::Increment() {
if (enabled_) {
count_++;
}
}
uint64_t Counter::GetCount() {
uint64_t result = count_;
if (type_ == Gauge) {
// If the counter is a Gauge, reset the count after reading.
count_ = 0;
}
return result;
}
const char* Counter::GetName() { return name_; }
CounterType Counter::GetType() { return type_; }
struct CounterDescriptor {
const char* name;
CounterType type;
};
static const CounterDescriptor kCounterList[] =
{{"Instruction", Cumulative},
{"Move Immediate", Gauge},
{"Add/Sub DP", Gauge},
{"Logical DP", Gauge},
{"Other Int DP", Gauge},
{"FP DP", Gauge},
{"Conditional Select", Gauge},
{"Conditional Compare", Gauge},
{"Unconditional Branch", Gauge},
{"Compare and Branch", Gauge},
{"Test and Branch", Gauge},
{"Conditional Branch", Gauge},
{"Load Integer", Gauge},
{"Load FP", Gauge},
{"Load Pair", Gauge},
{"Load Literal", Gauge},
{"Store Integer", Gauge},
{"Store FP", Gauge},
{"Store Pair", Gauge},
{"PC Addressing", Gauge},
{"Other", Gauge},
{"NEON", Gauge},
{"Crypto", Gauge}};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stdout), sample_period_(sample_period) {
// Set up the output stream. If datafile is non-NULL, use that file. If it
// can't be opened, or datafile is NULL, use stdout.
if (datafile != NULL) {
output_stream_ = fopen(datafile, "w");
if (output_stream_ == NULL) {
printf("Can't open output file %s. Using stdout.\n", datafile);
output_stream_ = stdout;
}
}
static const int num_counters =
sizeof(kCounterList) / sizeof(CounterDescriptor);
// Dump an instrumentation description comment at the top of the file.
fprintf(output_stream_, "# counters=%d\n", num_counters);
fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
// Construct Counter objects from counter description array.
for (int i = 0; i < num_counters; i++) {
Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
counters_.push_back(counter);
}
DumpCounterNames();
}
Instrument::~Instrument() {
// Dump any remaining instruction data to the output file.
DumpCounters();
// Free all the counter objects.
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
delete *it;
}
if (output_stream_ != stdout) {
fclose(output_stream_);
}
}
void Instrument::Update() {
// Increment the instruction counter, and dump all counters if a sample period
// has elapsed.
static Counter* counter = GetCounter("Instruction");
VIXL_ASSERT(counter->GetType() == Cumulative);
counter->Increment();
if ((sample_period_ != 0) && counter->IsEnabled() &&
(counter->GetCount() % sample_period_) == 0) {
DumpCounters();
}
}
void Instrument::DumpCounters() {
// Iterate through the counter objects, dumping their values to the output
// stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%" PRIu64 ",", (*it)->GetCount());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::DumpCounterNames() {
// Iterate through the counter objects, dumping the counter names to the
// output stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%s,", (*it)->GetName());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::HandleInstrumentationEvent(unsigned event) {
switch (event) {
case InstrumentStateEnable:
Enable();
break;
case InstrumentStateDisable:
Disable();
break;
default:
DumpEventMarker(event);
}
}
void Instrument::DumpEventMarker(unsigned marker) {
// Dumpan event marker to the output stream as a specially formatted comment
// line.
static Counter* counter = GetCounter("Instruction");
fprintf(output_stream_,
"# %c%c @ %" PRId64 "\n",
marker & 0xff,
(marker >> 8) & 0xff,
counter->GetCount());
}
Counter* Instrument::GetCounter(const char* name) {
// Get a Counter object by name from the counter list.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
if (strcmp((*it)->GetName(), name) == 0) {
return *it;
}
}
// A Counter by that name does not exist: print an error message to stderr
// and the output file, and exit.
static const char* error_message =
"# Error: Unknown counter \"%s\". Exiting.\n";
fprintf(stderr, error_message, name);
fprintf(output_stream_, error_message, name);
exit(1);
}
void Instrument::Enable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Enable();
}
}
void Instrument::Disable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Disable();
}
}
void Instrument::VisitPCRelAddressing(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("PC Addressing");
counter->Increment();
}
void Instrument::VisitAddSubImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitLogicalImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitMoveWideImmediate(const Instruction* instr) {
Update();
static Counter* counter = GetCounter("Move Immediate");
if (instr->IsMovn() && (instr->GetRd() == kZeroRegCode)) {
unsigned imm = instr->GetImmMoveWide();
HandleInstrumentationEvent(imm);
} else {
counter->Increment();
}
}
void Instrument::VisitBitfield(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitExtract(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitUnconditionalBranch(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitUnconditionalBranchToRegister(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitCompareBranch(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Compare and Branch");
counter->Increment();
}
void Instrument::VisitTestBranch(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Test and Branch");
counter->Increment();
}
void Instrument::VisitConditionalBranch(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Branch");
counter->Increment();
}
void Instrument::VisitSystem(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitException(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::InstrumentLoadStorePair(const Instruction* instr) {
static Counter* load_pair_counter = GetCounter("Load Pair");
static Counter* store_pair_counter = GetCounter("Store Pair");
if (instr->Mask(LoadStorePairLBit) != 0) {
load_pair_counter->Increment();
} else {
store_pair_counter->Increment();
}
}
void Instrument::VisitLoadStorePairPostIndex(const Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairOffset(const Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairPreIndex(const Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairNonTemporal(const Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStoreExclusive(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitAtomicMemory(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitLoadLiteral(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Load Literal");
counter->Increment();
}
void Instrument::InstrumentLoadStore(const Instruction* instr) {
static Counter* load_int_counter = GetCounter("Load Integer");
static Counter* store_int_counter = GetCounter("Store Integer");
static Counter* load_fp_counter = GetCounter("Load FP");
static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreMask)) {
case STRB_w:
case STRH_w:
case STR_w:
VIXL_FALLTHROUGH();
case STR_x:
store_int_counter->Increment();
break;
case STR_s:
VIXL_FALLTHROUGH();
case STR_d:
store_fp_counter->Increment();
break;
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_x:
case LDRSH_x:
case LDRSW_x:
case LDRSB_w:
VIXL_FALLTHROUGH();
case LDRSH_w:
load_int_counter->Increment();
break;
case LDR_s:
VIXL_FALLTHROUGH();
case LDR_d:
load_fp_counter->Increment();
break;
}
}
void Instrument::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePostIndex(const Instruction* instr) {
USE(instr);
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePreIndex(const Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreRegisterOffset(const Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLogicalShifted(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitAddSubShifted(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitAddSubExtended(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitAddSubWithCarry(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitConditionalCompareRegister(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalCompareImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalSelect(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitDataProcessing1Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing2Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing3Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitFPCompare(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPConditionalCompare(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitFPConditionalSelect(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitFPImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing1Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing2Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing3Source(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPIntegerConvert(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPFixedPointConvert(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitCrypto2RegSHA(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Crypto");
counter->Increment();
}
void Instrument::VisitCrypto3RegSHA(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Crypto");
counter->Increment();
}
void Instrument::VisitCryptoAES(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Crypto");
counter->Increment();
}
void Instrument::VisitNEON2RegMisc(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON2RegMiscFP16(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Same(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3SameFP16(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3SameExtra(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Different(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONAcrossLanes(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONByIndexedElement(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONCopy(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONExtract(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStructPostIndex(
const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStructPostIndex(
const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONModifiedImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar2RegMisc(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar2RegMiscFP16(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Diff(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Same(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3SameFP16(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3SameExtra(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarByIndexedElement(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarCopy(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarPairwise(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarShiftImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONShiftImmediate(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONTable(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONPerm(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitUnallocated(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitUnimplemented(const Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
} // namespace aarch64
} // namespace vixl

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,528 @@
// Copyright 2016, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "operands-aarch64.h"
namespace vixl {
namespace aarch64 {
// CPURegList utilities.
CPURegister CPURegList::PopLowestIndex() {
if (IsEmpty()) {
return NoCPUReg;
}
int index = CountTrailingZeros(list_);
VIXL_ASSERT((1 << index) & list_);
Remove(index);
return CPURegister(index, size_, type_);
}
CPURegister CPURegList::PopHighestIndex() {
VIXL_ASSERT(IsValid());
if (IsEmpty()) {
return NoCPUReg;
}
int index = CountLeadingZeros(list_);
index = kRegListSizeInBits - 1 - index;
VIXL_ASSERT((1 << index) & list_);
Remove(index);
return CPURegister(index, size_, type_);
}
bool CPURegList::IsValid() const {
if ((type_ == CPURegister::kRegister) || (type_ == CPURegister::kVRegister)) {
bool is_valid = true;
// Try to create a CPURegister for each element in the list.
for (int i = 0; i < kRegListSizeInBits; i++) {
if (((list_ >> i) & 1) != 0) {
is_valid &= CPURegister(i, size_, type_).IsValid();
}
}
return is_valid;
} else if (type_ == CPURegister::kNoRegister) {
// We can't use IsEmpty here because that asserts IsValid().
return list_ == 0;
} else {
return false;
}
}
void CPURegList::RemoveCalleeSaved() {
if (GetType() == CPURegister::kRegister) {
Remove(GetCalleeSaved(GetRegisterSizeInBits()));
} else if (GetType() == CPURegister::kVRegister) {
Remove(GetCalleeSavedV(GetRegisterSizeInBits()));
} else {
VIXL_ASSERT(GetType() == CPURegister::kNoRegister);
VIXL_ASSERT(IsEmpty());
// The list must already be empty, so do nothing.
}
}
CPURegList CPURegList::Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3) {
return Union(list_1, Union(list_2, list_3));
}
CPURegList CPURegList::Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4) {
return Union(Union(list_1, list_2), Union(list_3, list_4));
}
CPURegList CPURegList::Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3) {
return Intersection(list_1, Intersection(list_2, list_3));
}
CPURegList CPURegList::Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4) {
return Intersection(Intersection(list_1, list_2),
Intersection(list_3, list_4));
}
CPURegList CPURegList::GetCalleeSaved(unsigned size) {
return CPURegList(CPURegister::kRegister, size, 19, 29);
}
CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
return CPURegList(CPURegister::kVRegister, size, 8, 15);
}
CPURegList CPURegList::GetCallerSaved(unsigned size) {
// Registers x0-x18 and lr (x30) are caller-saved.
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
// Do not use lr directly to avoid initialisation order fiasco bugs for users.
list.Combine(Register(30, kXRegSize));
return list;
}
CPURegList CPURegList::GetCallerSavedV(unsigned size) {
// Registers d0-d7 and d16-d31 are caller-saved.
CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
return list;
}
const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
// Registers.
#define WREG(n) w##n,
const Register Register::wregisters[] = {AARCH64_REGISTER_CODE_LIST(WREG)};
#undef WREG
#define XREG(n) x##n,
const Register Register::xregisters[] = {AARCH64_REGISTER_CODE_LIST(XREG)};
#undef XREG
#define BREG(n) b##n,
const VRegister VRegister::bregisters[] = {AARCH64_REGISTER_CODE_LIST(BREG)};
#undef BREG
#define HREG(n) h##n,
const VRegister VRegister::hregisters[] = {AARCH64_REGISTER_CODE_LIST(HREG)};
#undef HREG
#define SREG(n) s##n,
const VRegister VRegister::sregisters[] = {AARCH64_REGISTER_CODE_LIST(SREG)};
#undef SREG
#define DREG(n) d##n,
const VRegister VRegister::dregisters[] = {AARCH64_REGISTER_CODE_LIST(DREG)};
#undef DREG
#define QREG(n) q##n,
const VRegister VRegister::qregisters[] = {AARCH64_REGISTER_CODE_LIST(QREG)};
#undef QREG
#define VREG(n) v##n,
const VRegister VRegister::vregisters[] = {AARCH64_REGISTER_CODE_LIST(VREG)};
#undef VREG
const Register& Register::GetWRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wsp;
} else {
VIXL_ASSERT(code < kNumberOfRegisters);
return wregisters[code];
}
}
const Register& Register::GetXRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return sp;
} else {
VIXL_ASSERT(code < kNumberOfRegisters);
return xregisters[code];
}
}
const VRegister& VRegister::GetBRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return bregisters[code];
}
const VRegister& VRegister::GetHRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return hregisters[code];
}
const VRegister& VRegister::GetSRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return sregisters[code];
}
const VRegister& VRegister::GetDRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return dregisters[code];
}
const VRegister& VRegister::GetQRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return qregisters[code];
}
const VRegister& VRegister::GetVRegFromCode(unsigned code) {
VIXL_ASSERT(code < kNumberOfVRegisters);
return vregisters[code];
}
const Register& CPURegister::W() const {
VIXL_ASSERT(IsValidRegister());
return Register::GetWRegFromCode(code_);
}
const Register& CPURegister::X() const {
VIXL_ASSERT(IsValidRegister());
return Register::GetXRegFromCode(code_);
}
const VRegister& CPURegister::B() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetBRegFromCode(code_);
}
const VRegister& CPURegister::H() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetHRegFromCode(code_);
}
const VRegister& CPURegister::S() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetSRegFromCode(code_);
}
const VRegister& CPURegister::D() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetDRegFromCode(code_);
}
const VRegister& CPURegister::Q() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetQRegFromCode(code_);
}
const VRegister& CPURegister::V() const {
VIXL_ASSERT(IsValidVRegister());
return VRegister::GetVRegFromCode(code_);
}
// Operand.
Operand::Operand(int64_t immediate)
: immediate_(immediate),
reg_(NoReg),
shift_(NO_SHIFT),
extend_(NO_EXTEND),
shift_amount_(0) {}
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
: reg_(reg),
shift_(shift),
extend_(NO_EXTEND),
shift_amount_(shift_amount) {
VIXL_ASSERT(shift != MSL);
VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
VIXL_ASSERT(!reg.IsSP());
}
Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
: reg_(reg),
shift_(NO_SHIFT),
extend_(extend),
shift_amount_(shift_amount) {
VIXL_ASSERT(reg.IsValid());
VIXL_ASSERT(shift_amount <= 4);
VIXL_ASSERT(!reg.IsSP());
// Extend modes SXTX and UXTX require a 64-bit register.
VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
}
bool Operand::IsImmediate() const { return reg_.Is(NoReg); }
bool Operand::IsPlainRegister() const {
return reg_.IsValid() &&
(((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||
// No-op shifts.
((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||
// No-op extend operations.
// We can't include [US]XTW here without knowing more about the
// context; they are only no-ops for 32-bit operations.
//
// For example, this operand could be replaced with w1:
// __ Add(w0, w0, Operand(w1, UXTW));
// However, no plain register can replace it in this context:
// __ Add(x0, x0, Operand(w1, UXTW));
(((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));
}
bool Operand::IsShiftedRegister() const {
return reg_.IsValid() && (shift_ != NO_SHIFT);
}
bool Operand::IsExtendedRegister() const {
return reg_.IsValid() && (extend_ != NO_EXTEND);
}
bool Operand::IsZero() const {
if (IsImmediate()) {
return GetImmediate() == 0;
} else {
return GetRegister().IsZero();
}
}
Operand Operand::ToExtendedRegister() const {
VIXL_ASSERT(IsShiftedRegister());
VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}
// MemOperand
MemOperand::MemOperand()
: base_(NoReg),
regoffset_(NoReg),
offset_(0),
addrmode_(Offset),
shift_(NO_SHIFT),
extend_(NO_EXTEND) {}
MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
: base_(base),
regoffset_(NoReg),
offset_(offset),
addrmode_(addrmode),
shift_(NO_SHIFT),
extend_(NO_EXTEND),
shift_amount_(0) {
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
}
MemOperand::MemOperand(Register base,
Register regoffset,
Extend extend,
unsigned shift_amount)
: base_(base),
regoffset_(regoffset),
offset_(0),
addrmode_(Offset),
shift_(NO_SHIFT),
extend_(extend),
shift_amount_(shift_amount) {
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
VIXL_ASSERT(!regoffset.IsSP());
VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
// SXTX extend mode requires a 64-bit offset register.
VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
}
MemOperand::MemOperand(Register base,
Register regoffset,
Shift shift,
unsigned shift_amount)
: base_(base),
regoffset_(regoffset),
offset_(0),
addrmode_(Offset),
shift_(shift),
extend_(NO_EXTEND),
shift_amount_(shift_amount) {
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
VIXL_ASSERT(shift == LSL);
}
MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
: base_(base),
regoffset_(NoReg),
addrmode_(addrmode),
shift_(NO_SHIFT),
extend_(NO_EXTEND),
shift_amount_(0) {
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
if (offset.IsImmediate()) {
offset_ = offset.GetImmediate();
} else if (offset.IsShiftedRegister()) {
VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
regoffset_ = offset.GetRegister();
shift_ = offset.GetShift();
shift_amount_ = offset.GetShiftAmount();
extend_ = NO_EXTEND;
offset_ = 0;
// These assertions match those in the shifted-register constructor.
VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
VIXL_ASSERT(shift_ == LSL);
} else {
VIXL_ASSERT(offset.IsExtendedRegister());
VIXL_ASSERT(addrmode == Offset);
regoffset_ = offset.GetRegister();
extend_ = offset.GetExtend();
shift_amount_ = offset.GetShiftAmount();
shift_ = NO_SHIFT;
offset_ = 0;
// These assertions match those in the extended-register constructor.
VIXL_ASSERT(!regoffset_.IsSP());
VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
}
}
bool MemOperand::IsImmediateOffset() const {
return (addrmode_ == Offset) && regoffset_.Is(NoReg);
}
bool MemOperand::IsRegisterOffset() const {
return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
}
bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
void MemOperand::AddOffset(int64_t offset) {
VIXL_ASSERT(IsImmediateOffset());
offset_ += offset;
}
GenericOperand::GenericOperand(const CPURegister& reg)
: cpu_register_(reg), mem_op_size_(0) {
if (reg.IsQ()) {
VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));
// Support for Q registers is not implemented yet.
VIXL_UNIMPLEMENTED();
}
}
GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)
: cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {
if (mem_op_size_ > kXRegSizeInBytes) {
// We only support generic operands up to the size of X registers.
VIXL_UNIMPLEMENTED();
}
}
bool GenericOperand::Equals(const GenericOperand& other) const {
if (!IsValid() || !other.IsValid()) {
// Two invalid generic operands are considered equal.
return !IsValid() && !other.IsValid();
}
if (IsCPURegister() && other.IsCPURegister()) {
return GetCPURegister().Is(other.GetCPURegister());
} else if (IsMemOperand() && other.IsMemOperand()) {
return GetMemOperand().Equals(other.GetMemOperand()) &&
(GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());
}
return false;
}
}
} // namespace vixl::aarch64

View file

@ -0,0 +1,197 @@
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
#include "simulator-aarch64.h"
#include "utils-vixl.h"
namespace vixl {
namespace aarch64 {
// Randomly generated example keys for simulating only.
const Simulator::PACKey Simulator::kPACKeyIA = {0xc31718727de20f71,
0xab9fd4e14b2fec51,
0};
const Simulator::PACKey Simulator::kPACKeyIB = {0xeebb163b474e04c8,
0x5267ac6fc280fb7c,
1};
const Simulator::PACKey Simulator::kPACKeyDA = {0x5caef808deb8b1e2,
0xd347cbc06b7b0f77,
0};
const Simulator::PACKey Simulator::kPACKeyDB = {0xe06aa1a949ba8cc7,
0xcfde69e3db6d0432,
1};
// The general PAC key isn't intended to be used with AuthPAC so we ensure the
// key number is invalid and asserts if used incorrectly.
const Simulator::PACKey Simulator::kPACKeyGA = {0xfcd98a44d564b3d5,
0x6c56df1904bf0ddc,
-1};
static uint64_t GetNibble(uint64_t in_data, int position) {
return (in_data >> position) & 0xf;
}
static uint64_t ShuffleNibbles(uint64_t in_data) {
static int in_positions[16] =
{4, 36, 52, 40, 44, 0, 24, 12, 56, 60, 8, 32, 16, 28, 20, 48};
uint64_t out_data = 0;
for (int i = 0; i < 16; i++) {
out_data |= GetNibble(in_data, in_positions[i]) << (4 * i);
}
return out_data;
}
static uint64_t SubstituteNibbles(uint64_t in_data) {
// Randomly chosen substitutes.
static uint64_t subs[16] =
{4, 7, 3, 9, 10, 14, 0, 1, 15, 2, 8, 6, 12, 5, 11, 13};
uint64_t out_data = 0;
for (int i = 0; i < 16; i++) {
int index = (in_data >> (4 * i)) & 0xf;
out_data |= subs[index] << (4 * i);
}
return out_data;
}
// Rotate nibble to the left by the amount specified.
static uint64_t RotNibble(uint64_t in_cell, int amount) {
VIXL_ASSERT((amount >= 0) && (amount <= 3));
in_cell &= 0xf;
uint64_t temp = (in_cell << 4) | in_cell;
return (temp >> (4 - amount)) & 0xf;
}
static uint64_t BigShuffle(uint64_t in_data) {
uint64_t out_data = 0;
for (int i = 0; i < 4; i++) {
uint64_t n12 = GetNibble(in_data, 4 * (i + 12));
uint64_t n8 = GetNibble(in_data, 4 * (i + 8));
uint64_t n4 = GetNibble(in_data, 4 * (i + 4));
uint64_t n0 = GetNibble(in_data, 4 * (i + 0));
uint64_t t0 = RotNibble(n8, 2) ^ RotNibble(n4, 1) ^ RotNibble(n0, 1);
uint64_t t1 = RotNibble(n12, 1) ^ RotNibble(n4, 2) ^ RotNibble(n0, 1);
uint64_t t2 = RotNibble(n12, 2) ^ RotNibble(n8, 1) ^ RotNibble(n0, 1);
uint64_t t3 = RotNibble(n12, 1) ^ RotNibble(n8, 1) ^ RotNibble(n4, 2);
out_data |= t3 << (4 * (i + 0));
out_data |= t2 << (4 * (i + 4));
out_data |= t1 << (4 * (i + 8));
out_data |= t0 << (4 * (i + 12));
}
return out_data;
}
// A simple, non-standard hash function invented for simulating. It mixes
// reasonably well, however it is unlikely to be cryptographically secure and
// may have a higher collision chance than other hashing algorithms.
uint64_t Simulator::ComputePAC(uint64_t data, uint64_t context, PACKey key) {
uint64_t working_value = data ^ key.high;
working_value = BigShuffle(working_value);
working_value = ShuffleNibbles(working_value);
working_value ^= key.low;
working_value = ShuffleNibbles(working_value);
working_value = BigShuffle(working_value);
working_value ^= context;
working_value = SubstituteNibbles(working_value);
working_value = BigShuffle(working_value);
working_value = SubstituteNibbles(working_value);
return working_value;
}
// The TTBR is selected by bit 63 or 55 depending on TBI for pointers without
// codes, but is always 55 once a PAC code is added to a pointer. For this
// reason, it must be calculated at the call site.
uint64_t Simulator::CalculatePACMask(uint64_t ptr, PointerType type, int ttbr) {
int bottom_pac_bit = GetBottomPACBit(ptr, ttbr);
int top_pac_bit = GetTopPACBit(ptr, type);
return ExtractUnsignedBitfield64(top_pac_bit,
bottom_pac_bit,
0xffffffffffffffff & ~kTTBRMask)
<< bottom_pac_bit;
}
uint64_t Simulator::AuthPAC(uint64_t ptr,
uint64_t context,
PACKey key,
PointerType type) {
VIXL_ASSERT((key.number == 0) || (key.number == 1));
uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
uint64_t original_ptr =
((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
uint64_t pac = ComputePAC(original_ptr, context, key);
uint64_t error_code = 1 << key.number;
if ((pac & pac_mask) == (ptr & pac_mask)) {
return original_ptr;
} else {
int error_lsb = GetTopPACBit(ptr, type) - 2;
uint64_t error_mask = UINT64_C(0x3) << error_lsb;
return (original_ptr & ~error_mask) | (error_code << error_lsb);
}
}
uint64_t Simulator::AddPAC(uint64_t ptr,
uint64_t context,
PACKey key,
PointerType type) {
int top_pac_bit = GetTopPACBit(ptr, type);
// TODO: Properly handle the case where extension bits are bad and TBI is
// turned off, and also test me.
VIXL_ASSERT(HasTBI(ptr, type));
int ttbr = (ptr >> 55) & 1;
uint64_t pac_mask = CalculatePACMask(ptr, type, ttbr);
uint64_t ext_ptr = (ttbr == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
uint64_t pac = ComputePAC(ext_ptr, context, key);
// If the pointer isn't all zeroes or all ones in the PAC bitfield, corrupt
// the resulting code.
if (((ptr & (pac_mask | kTTBRMask)) != 0x0) &&
((~ptr & (pac_mask | kTTBRMask)) != 0x0)) {
pac ^= UINT64_C(1) << (top_pac_bit - 1);
}
uint64_t ttbr_shifted = static_cast<uint64_t>(ttbr) << 55;
return (pac & pac_mask) | ttbr_shifted | (ptr & ~pac_mask);
}
uint64_t Simulator::StripPAC(uint64_t ptr, PointerType type) {
uint64_t pac_mask = CalculatePACMask(ptr, type, (ptr >> 55) & 1);
return ((ptr & kTTBRMask) == 0) ? (ptr & ~pac_mask) : (ptr | pac_mask);
}
} // namespace aarch64
} // namespace vixl
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,178 @@
// Copyright 2017, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
extern "C" {
#include <sys/mman.h>
}
#include "code-buffer-vixl.h"
#include "utils-vixl.h"
namespace vixl {
CodeBuffer::CodeBuffer(size_t capacity)
: buffer_(NULL),
managed_(true),
cursor_(NULL),
dirty_(false),
capacity_(capacity) {
if (capacity_ == 0) {
return;
}
#ifdef VIXL_CODE_BUFFER_MALLOC
buffer_ = reinterpret_cast<byte*>(malloc(capacity_));
#elif defined(VIXL_CODE_BUFFER_MMAP)
buffer_ = reinterpret_cast<byte*>(mmap(NULL,
capacity,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1,
0));
#else
#error Unknown code buffer allocator.
#endif
VIXL_CHECK(buffer_ != NULL);
// Aarch64 instructions must be word aligned, we assert the default allocator
// always returns word align memory.
VIXL_ASSERT(IsWordAligned(buffer_));
cursor_ = buffer_;
}
CodeBuffer::CodeBuffer(byte* buffer, size_t capacity)
: buffer_(reinterpret_cast<byte*>(buffer)),
managed_(false),
cursor_(reinterpret_cast<byte*>(buffer)),
dirty_(false),
capacity_(capacity) {
VIXL_ASSERT(buffer_ != NULL);
}
CodeBuffer::~CodeBuffer() {
VIXL_ASSERT(!IsDirty());
if (managed_) {
#ifdef VIXL_CODE_BUFFER_MALLOC
free(buffer_);
#elif defined(VIXL_CODE_BUFFER_MMAP)
munmap(buffer_, capacity_);
#else
#error Unknown code buffer allocator.
#endif
}
}
#ifdef VIXL_CODE_BUFFER_MMAP
void CodeBuffer::SetExecutable() {
int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_EXEC);
VIXL_CHECK(ret == 0);
}
#endif
#ifdef VIXL_CODE_BUFFER_MMAP
void CodeBuffer::SetWritable() {
int ret = mprotect(buffer_, capacity_, PROT_READ | PROT_WRITE);
VIXL_CHECK(ret == 0);
}
#endif
void CodeBuffer::EmitString(const char* string) {
VIXL_ASSERT(HasSpaceFor(strlen(string) + 1));
char* dst = reinterpret_cast<char*>(cursor_);
dirty_ = true;
char* null_char = stpcpy(dst, string);
cursor_ = reinterpret_cast<byte*>(null_char) + 1;
}
void CodeBuffer::EmitData(const void* data, size_t size) {
VIXL_ASSERT(HasSpaceFor(size));
dirty_ = true;
memcpy(cursor_, data, size);
cursor_ = cursor_ + size;
}
void CodeBuffer::UpdateData(size_t offset, const void* data, size_t size) {
dirty_ = true;
byte* dst = buffer_ + offset;
VIXL_ASSERT(dst + size <= cursor_);
memcpy(dst, data, size);
}
void CodeBuffer::Align() {
byte* end = AlignUp(cursor_, 4);
const size_t padding_size = end - cursor_;
VIXL_ASSERT(padding_size <= 4);
EmitZeroedBytes(static_cast<int>(padding_size));
}
void CodeBuffer::EmitZeroedBytes(int n) {
EnsureSpaceFor(n);
dirty_ = true;
memset(cursor_, 0, n);
cursor_ += n;
}
void CodeBuffer::Reset() {
#ifdef VIXL_DEBUG
if (managed_) {
// Fill with zeros (there is no useful value common to A32 and T32).
memset(buffer_, 0, capacity_);
}
#endif
cursor_ = buffer_;
SetClean();
}
void CodeBuffer::Grow(size_t new_capacity) {
VIXL_ASSERT(managed_);
VIXL_ASSERT(new_capacity > capacity_);
ptrdiff_t cursor_offset = GetCursorOffset();
#ifdef VIXL_CODE_BUFFER_MALLOC
buffer_ = static_cast<byte*>(realloc(buffer_, new_capacity));
VIXL_CHECK(buffer_ != NULL);
#elif defined(VIXL_CODE_BUFFER_MMAP)
buffer_ = static_cast<byte*>(
mremap(buffer_, capacity_, new_capacity, MREMAP_MAYMOVE));
VIXL_CHECK(buffer_ != MAP_FAILED);
#else
#error Unknown code buffer allocator.
#endif
cursor_ = buffer_ + cursor_offset;
capacity_ = new_capacity;
}
} // namespace vixl

View file

@ -0,0 +1,144 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "compiler-intrinsics-vixl.h"
namespace vixl {
int CountLeadingSignBitsFallBack(int64_t value, int width) {
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
return CountLeadingZeros(~value, width) - 1;
}
}
int CountLeadingZerosFallBack(uint64_t value, int width) {
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
if (value == 0) {
return width;
}
int count = 0;
value = value << (64 - width);
if ((value & UINT64_C(0xffffffff00000000)) == 0) {
count += 32;
value = value << 32;
}
if ((value & UINT64_C(0xffff000000000000)) == 0) {
count += 16;
value = value << 16;
}
if ((value & UINT64_C(0xff00000000000000)) == 0) {
count += 8;
value = value << 8;
}
if ((value & UINT64_C(0xf000000000000000)) == 0) {
count += 4;
value = value << 4;
}
if ((value & UINT64_C(0xc000000000000000)) == 0) {
count += 2;
value = value << 2;
}
if ((value & UINT64_C(0x8000000000000000)) == 0) {
count += 1;
}
count += (value == 0);
return count;
}
int CountSetBitsFallBack(uint64_t value, int width) {
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
// Mask out unused bits to ensure that they are not counted.
value &= (UINT64_C(0xffffffffffffffff) >> (64 - width));
// Add up the set bits.
// The algorithm works by adding pairs of bit fields together iteratively,
// where the size of each bit field doubles each time.
// An example for an 8-bit value:
// Bits: h g f e d c b a
// \ | \ | \ | \ |
// value = h+g f+e d+c b+a
// \ | \ |
// value = h+g+f+e d+c+b+a
// \ |
// value = h+g+f+e+d+c+b+a
const uint64_t kMasks[] = {
UINT64_C(0x5555555555555555),
UINT64_C(0x3333333333333333),
UINT64_C(0x0f0f0f0f0f0f0f0f),
UINT64_C(0x00ff00ff00ff00ff),
UINT64_C(0x0000ffff0000ffff),
UINT64_C(0x00000000ffffffff),
};
for (unsigned i = 0; i < (sizeof(kMasks) / sizeof(kMasks[0])); i++) {
int shift = 1 << i;
value = ((value >> shift) & kMasks[i]) + (value & kMasks[i]);
}
return static_cast<int>(value);
}
int CountTrailingZerosFallBack(uint64_t value, int width) {
VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
int count = 0;
value = value << (64 - width);
if ((value & UINT64_C(0xffffffff)) == 0) {
count += 32;
value = value >> 32;
}
if ((value & 0xffff) == 0) {
count += 16;
value = value >> 16;
}
if ((value & 0xff) == 0) {
count += 8;
value = value >> 8;
}
if ((value & 0xf) == 0) {
count += 4;
value = value >> 4;
}
if ((value & 0x3) == 0) {
count += 2;
value = value >> 2;
}
if ((value & 0x1) == 0) {
count += 1;
}
count += (value == 0);
return count - (64 - width);
}
} // namespace vixl

View file

@ -0,0 +1,211 @@
// Copyright 2018, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <ostream>
#include "cpu-features.h"
#include "globals-vixl.h"
#include "utils-vixl.h"
namespace vixl {
static uint64_t MakeFeatureMask(CPUFeatures::Feature feature) {
if (feature == CPUFeatures::kNone) {
return 0;
} else {
// Check that the shift is well-defined, and that the feature is valid.
VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures <=
(sizeof(uint64_t) * 8));
VIXL_ASSERT(feature < CPUFeatures::kNumberOfFeatures);
return UINT64_C(1) << feature;
}
}
CPUFeatures::CPUFeatures(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3)
: features_(0) {
Combine(feature0, feature1, feature2, feature3);
}
CPUFeatures CPUFeatures::All() {
CPUFeatures all;
// Check that the shift is well-defined.
VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures < (sizeof(uint64_t) * 8));
all.features_ = (UINT64_C(1) << kNumberOfFeatures) - 1;
return all;
}
CPUFeatures CPUFeatures::InferFromOS() {
// TODO: Actually infer features from the OS.
return CPUFeatures();
}
void CPUFeatures::Combine(const CPUFeatures& other) {
features_ |= other.features_;
}
void CPUFeatures::Combine(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) {
features_ |= MakeFeatureMask(feature0);
features_ |= MakeFeatureMask(feature1);
features_ |= MakeFeatureMask(feature2);
features_ |= MakeFeatureMask(feature3);
}
void CPUFeatures::Remove(const CPUFeatures& other) {
features_ &= ~other.features_;
}
void CPUFeatures::Remove(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) {
features_ &= ~MakeFeatureMask(feature0);
features_ &= ~MakeFeatureMask(feature1);
features_ &= ~MakeFeatureMask(feature2);
features_ &= ~MakeFeatureMask(feature3);
}
CPUFeatures CPUFeatures::With(const CPUFeatures& other) const {
CPUFeatures f(*this);
f.Combine(other);
return f;
}
CPUFeatures CPUFeatures::With(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) const {
CPUFeatures f(*this);
f.Combine(feature0, feature1, feature2, feature3);
return f;
}
CPUFeatures CPUFeatures::Without(const CPUFeatures& other) const {
CPUFeatures f(*this);
f.Remove(other);
return f;
}
CPUFeatures CPUFeatures::Without(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) const {
CPUFeatures f(*this);
f.Remove(feature0, feature1, feature2, feature3);
return f;
}
bool CPUFeatures::Has(const CPUFeatures& other) const {
return (features_ & other.features_) == other.features_;
}
bool CPUFeatures::Has(Feature feature0,
Feature feature1,
Feature feature2,
Feature feature3) const {
uint64_t mask = MakeFeatureMask(feature0) | MakeFeatureMask(feature1) |
MakeFeatureMask(feature2) | MakeFeatureMask(feature3);
return (features_ & mask) == mask;
}
size_t CPUFeatures::Count() const { return CountSetBits(features_); }
std::ostream& operator<<(std::ostream& os, CPUFeatures::Feature feature) {
// clang-format off
switch (feature) {
#define VIXL_FORMAT_FEATURE(SYMBOL, NAME, CPUINFO) \
case CPUFeatures::SYMBOL: \
return os << NAME;
VIXL_CPU_FEATURE_LIST(VIXL_FORMAT_FEATURE)
#undef VIXL_FORMAT_FEATURE
case CPUFeatures::kNone:
return os << "none";
case CPUFeatures::kNumberOfFeatures:
VIXL_UNREACHABLE();
}
// clang-format on
VIXL_UNREACHABLE();
return os;
}
CPUFeatures::const_iterator CPUFeatures::begin() const {
if (features_ == 0) return const_iterator(this, kNone);
int feature_number = CountTrailingZeros(features_);
vixl::CPUFeatures::Feature feature =
static_cast<CPUFeatures::Feature>(feature_number);
return const_iterator(this, feature);
}
CPUFeatures::const_iterator CPUFeatures::end() const {
return const_iterator(this, kNone);
}
std::ostream& operator<<(std::ostream& os, const CPUFeatures& features) {
CPUFeatures::const_iterator it = features.begin();
while (it != features.end()) {
os << *it;
++it;
if (it != features.end()) os << ", ";
}
return os;
}
bool CPUFeaturesConstIterator::operator==(
const CPUFeaturesConstIterator& other) const {
VIXL_ASSERT(IsValid());
return (cpu_features_ == other.cpu_features_) && (feature_ == other.feature_);
}
CPUFeatures::Feature CPUFeaturesConstIterator::operator++() { // Prefix
VIXL_ASSERT(IsValid());
do {
// Find the next feature. The order is unspecified.
feature_ = static_cast<CPUFeatures::Feature>(feature_ + 1);
if (feature_ == CPUFeatures::kNumberOfFeatures) {
feature_ = CPUFeatures::kNone;
VIXL_STATIC_ASSERT(CPUFeatures::kNone == -1);
}
VIXL_ASSERT(CPUFeatures::kNone <= feature_);
VIXL_ASSERT(feature_ < CPUFeatures::kNumberOfFeatures);
// cpu_features_->Has(kNone) is always true, so this will terminate even if
// the features list is empty.
} while (!cpu_features_->Has(feature_));
return feature_;
}
CPUFeatures::Feature CPUFeaturesConstIterator::operator++(int) { // Postfix
CPUFeatures::Feature result = feature_;
++(*this);
return result;
}
} // namespace vixl

555
dep/vixl/src/utils-vixl.cc Normal file
View file

@ -0,0 +1,555 @@
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdio>
#include "utils-vixl.h"
namespace vixl {
// The default NaN values (for FPCR.DN=1).
const double kFP64DefaultNaN = RawbitsToDouble(UINT64_C(0x7ff8000000000000));
const float kFP32DefaultNaN = RawbitsToFloat(0x7fc00000);
const Float16 kFP16DefaultNaN = RawbitsToFloat16(0x7e00);
// Floating-point zero values.
const Float16 kFP16PositiveZero = RawbitsToFloat16(0x0);
const Float16 kFP16NegativeZero = RawbitsToFloat16(0x8000);
// Floating-point infinity values.
const Float16 kFP16PositiveInfinity = RawbitsToFloat16(0x7c00);
const Float16 kFP16NegativeInfinity = RawbitsToFloat16(0xfc00);
const float kFP32PositiveInfinity = RawbitsToFloat(0x7f800000);
const float kFP32NegativeInfinity = RawbitsToFloat(0xff800000);
const double kFP64PositiveInfinity =
RawbitsToDouble(UINT64_C(0x7ff0000000000000));
const double kFP64NegativeInfinity =
RawbitsToDouble(UINT64_C(0xfff0000000000000));
bool IsZero(Float16 value) {
uint16_t bits = Float16ToRawbits(value);
return (bits == Float16ToRawbits(kFP16PositiveZero) ||
bits == Float16ToRawbits(kFP16NegativeZero));
}
uint16_t Float16ToRawbits(Float16 value) { return value.rawbits_; }
uint32_t FloatToRawbits(float value) {
uint32_t bits = 0;
memcpy(&bits, &value, 4);
return bits;
}
uint64_t DoubleToRawbits(double value) {
uint64_t bits = 0;
memcpy(&bits, &value, 8);
return bits;
}
Float16 RawbitsToFloat16(uint16_t bits) {
Float16 f;
f.rawbits_ = bits;
return f;
}
float RawbitsToFloat(uint32_t bits) {
float value = 0.0;
memcpy(&value, &bits, 4);
return value;
}
double RawbitsToDouble(uint64_t bits) {
double value = 0.0;
memcpy(&value, &bits, 8);
return value;
}
uint32_t Float16Sign(internal::SimFloat16 val) {
uint16_t rawbits = Float16ToRawbits(val);
return ExtractUnsignedBitfield32(15, 15, rawbits);
}
uint32_t Float16Exp(internal::SimFloat16 val) {
uint16_t rawbits = Float16ToRawbits(val);
return ExtractUnsignedBitfield32(14, 10, rawbits);
}
uint32_t Float16Mantissa(internal::SimFloat16 val) {
uint16_t rawbits = Float16ToRawbits(val);
return ExtractUnsignedBitfield32(9, 0, rawbits);
}
uint32_t FloatSign(float val) {
uint32_t rawbits = FloatToRawbits(val);
return ExtractUnsignedBitfield32(31, 31, rawbits);
}
uint32_t FloatExp(float val) {
uint32_t rawbits = FloatToRawbits(val);
return ExtractUnsignedBitfield32(30, 23, rawbits);
}
uint32_t FloatMantissa(float val) {
uint32_t rawbits = FloatToRawbits(val);
return ExtractUnsignedBitfield32(22, 0, rawbits);
}
uint32_t DoubleSign(double val) {
uint64_t rawbits = DoubleToRawbits(val);
return static_cast<uint32_t>(ExtractUnsignedBitfield64(63, 63, rawbits));
}
uint32_t DoubleExp(double val) {
uint64_t rawbits = DoubleToRawbits(val);
return static_cast<uint32_t>(ExtractUnsignedBitfield64(62, 52, rawbits));
}
uint64_t DoubleMantissa(double val) {
uint64_t rawbits = DoubleToRawbits(val);
return ExtractUnsignedBitfield64(51, 0, rawbits);
}
internal::SimFloat16 Float16Pack(uint16_t sign,
uint16_t exp,
uint16_t mantissa) {
uint16_t bits = (sign << 15) | (exp << 10) | mantissa;
return RawbitsToFloat16(bits);
}
float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
return RawbitsToFloat(bits);
}
double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
return RawbitsToDouble(bits);
}
int Float16Classify(Float16 value) {
uint16_t bits = Float16ToRawbits(value);
uint16_t exponent_max = (1 << 5) - 1;
uint16_t exponent_mask = exponent_max << 10;
uint16_t mantissa_mask = (1 << 10) - 1;
uint16_t exponent = (bits & exponent_mask) >> 10;
uint16_t mantissa = bits & mantissa_mask;
if (exponent == 0) {
if (mantissa == 0) {
return FP_ZERO;
}
return FP_SUBNORMAL;
} else if (exponent == exponent_max) {
if (mantissa == 0) {
return FP_INFINITE;
}
return FP_NAN;
}
return FP_NORMAL;
}
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
VIXL_ASSERT((reg_size % 8) == 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
count++;
}
imm >>= 16;
}
return count;
}
int BitCount(uint64_t value) { return CountSetBits(value); }
// Float16 definitions.
Float16::Float16(double dvalue) {
rawbits_ =
Float16ToRawbits(FPToFloat16(dvalue, FPTieEven, kIgnoreDefaultNaN));
}
namespace internal {
SimFloat16 SimFloat16::operator-() const {
return RawbitsToFloat16(rawbits_ ^ 0x8000);
}
// SimFloat16 definitions.
SimFloat16 SimFloat16::operator+(SimFloat16 rhs) const {
return static_cast<double>(*this) + static_cast<double>(rhs);
}
SimFloat16 SimFloat16::operator-(SimFloat16 rhs) const {
return static_cast<double>(*this) - static_cast<double>(rhs);
}
SimFloat16 SimFloat16::operator*(SimFloat16 rhs) const {
return static_cast<double>(*this) * static_cast<double>(rhs);
}
SimFloat16 SimFloat16::operator/(SimFloat16 rhs) const {
return static_cast<double>(*this) / static_cast<double>(rhs);
}
bool SimFloat16::operator<(SimFloat16 rhs) const {
return static_cast<double>(*this) < static_cast<double>(rhs);
}
bool SimFloat16::operator>(SimFloat16 rhs) const {
return static_cast<double>(*this) > static_cast<double>(rhs);
}
bool SimFloat16::operator==(SimFloat16 rhs) const {
if (IsNaN(*this) || IsNaN(rhs)) {
return false;
} else if (IsZero(rhs) && IsZero(*this)) {
// +0 and -0 should be treated as equal.
return true;
}
return this->rawbits_ == rhs.rawbits_;
}
bool SimFloat16::operator!=(SimFloat16 rhs) const { return !(*this == rhs); }
bool SimFloat16::operator==(double rhs) const {
return static_cast<double>(*this) == static_cast<double>(rhs);
}
SimFloat16::operator double() const {
return FPToDouble(*this, kIgnoreDefaultNaN);
}
Int64 BitCount(Uint32 value) { return CountSetBits(value.Get()); }
} // namespace internal
float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception) {
uint16_t bits = Float16ToRawbits(value);
uint32_t sign = bits >> 15;
uint32_t exponent =
ExtractUnsignedBitfield32(kFloat16MantissaBits + kFloat16ExponentBits - 1,
kFloat16MantissaBits,
bits);
uint32_t mantissa =
ExtractUnsignedBitfield32(kFloat16MantissaBits - 1, 0, bits);
switch (Float16Classify(value)) {
case FP_ZERO:
return (sign == 0) ? 0.0f : -0.0f;
case FP_INFINITE:
return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity;
case FP_SUBNORMAL: {
// Calculate shift required to put mantissa into the most-significant bits
// of the destination mantissa.
int shift = CountLeadingZeros(mantissa << (32 - 10));
// Shift mantissa and discard implicit '1'.
mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1;
mantissa &= (1 << kFloatMantissaBits) - 1;
// Adjust the exponent for the shift applied, and rebias.
exponent = exponent - shift + (-15 + 127);
break;
}
case FP_NAN:
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred entirely, except that the top
// bit is forced to '1', making the result a quiet NaN. The unused
// (low-order) payload bits are set to 0.
exponent = (1 << kFloatExponentBits) - 1;
// Increase bits in mantissa, making low-order bits 0.
mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
mantissa |= 1 << 22; // Force a quiet NaN.
break;
case FP_NORMAL:
// Increase bits in mantissa, making low-order bits 0.
mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
// Change exponent bias.
exponent += (-15 + 127);
break;
default:
VIXL_UNREACHABLE();
}
return RawbitsToFloat((sign << 31) | (exponent << kFloatMantissaBits) |
mantissa);
}
float FPToFloat(double value,
FPRounding round_mode,
UseDefaultNaN DN,
bool* exception) {
// Only the FPTieEven rounding mode is implemented.
VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
USE(round_mode);
switch (std::fpclassify(value)) {
case FP_NAN: {
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred as much as possible, except
// that the top bit is forced to '1', making the result a quiet NaN.
uint64_t raw = DoubleToRawbits(value);
uint32_t sign = raw >> 63;
uint32_t exponent = (1 << 8) - 1;
uint32_t payload =
static_cast<uint32_t>(ExtractUnsignedBitfield64(50, 52 - 23, raw));
payload |= (1 << 22); // Force a quiet NaN.
return RawbitsToFloat((sign << 31) | (exponent << 23) | payload);
}
case FP_ZERO:
case FP_INFINITE: {
// In a C++ cast, any value representable in the target type will be
// unchanged. This is always the case for +/-0.0 and infinities.
return static_cast<float>(value);
}
case FP_NORMAL:
case FP_SUBNORMAL: {
// Convert double-to-float as the processor would, assuming that FPCR.FZ
// (flush-to-zero) is not set.
uint64_t raw = DoubleToRawbits(value);
// Extract the IEEE-754 double components.
uint32_t sign = raw >> 63;
// Extract the exponent and remove the IEEE-754 encoding bias.
int32_t exponent =
static_cast<int32_t>(ExtractUnsignedBitfield64(62, 52, raw)) - 1023;
// Extract the mantissa and add the implicit '1' bit.
uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
if (std::fpclassify(value) == FP_NORMAL) {
mantissa |= (UINT64_C(1) << 52);
}
return FPRoundToFloat(sign, exponent, mantissa, round_mode);
}
}
VIXL_UNREACHABLE();
return value;
}
// TODO: We should consider implementing a full FPToDouble(Float16)
// conversion function (for performance reasons).
double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception) {
// We can rely on implicit float to double conversion here.
return FPToFloat(value, DN, exception);
}
double FPToDouble(float value, UseDefaultNaN DN, bool* exception) {
switch (std::fpclassify(value)) {
case FP_NAN: {
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP64DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred entirely, except that the top
// bit is forced to '1', making the result a quiet NaN. The unused
// (low-order) payload bits are set to 0.
uint32_t raw = FloatToRawbits(value);
uint64_t sign = raw >> 31;
uint64_t exponent = (1 << 11) - 1;
uint64_t payload = ExtractUnsignedBitfield64(21, 0, raw);
payload <<= (52 - 23); // The unused low-order bits should be 0.
payload |= (UINT64_C(1) << 51); // Force a quiet NaN.
return RawbitsToDouble((sign << 63) | (exponent << 52) | payload);
}
case FP_ZERO:
case FP_NORMAL:
case FP_SUBNORMAL:
case FP_INFINITE: {
// All other inputs are preserved in a standard cast, because every value
// representable using an IEEE-754 float is also representable using an
// IEEE-754 double.
return static_cast<double>(value);
}
}
VIXL_UNREACHABLE();
return static_cast<double>(value);
}
Float16 FPToFloat16(float value,
FPRounding round_mode,
UseDefaultNaN DN,
bool* exception) {
// Only the FPTieEven rounding mode is implemented.
VIXL_ASSERT(round_mode == FPTieEven);
USE(round_mode);
uint32_t raw = FloatToRawbits(value);
int32_t sign = raw >> 31;
int32_t exponent = ExtractUnsignedBitfield32(30, 23, raw) - 127;
uint32_t mantissa = ExtractUnsignedBitfield32(22, 0, raw);
switch (std::fpclassify(value)) {
case FP_NAN: {
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred as much as possible, except
// that the top bit is forced to '1', making the result a quiet NaN.
uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
: Float16ToRawbits(kFP16NegativeInfinity);
result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits);
result |= (1 << 9); // Force a quiet NaN;
return RawbitsToFloat16(result);
}
case FP_ZERO:
return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
case FP_INFINITE:
return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
case FP_NORMAL:
case FP_SUBNORMAL: {
// Convert float-to-half as the processor would, assuming that FPCR.FZ
// (flush-to-zero) is not set.
// Add the implicit '1' bit to the mantissa.
mantissa += (1 << 23);
return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
}
}
VIXL_UNREACHABLE();
return kFP16PositiveZero;
}
Float16 FPToFloat16(double value,
FPRounding round_mode,
UseDefaultNaN DN,
bool* exception) {
// Only the FPTieEven rounding mode is implemented.
VIXL_ASSERT(round_mode == FPTieEven);
USE(round_mode);
uint64_t raw = DoubleToRawbits(value);
int32_t sign = raw >> 63;
int64_t exponent = ExtractUnsignedBitfield64(62, 52, raw) - 1023;
uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
switch (std::fpclassify(value)) {
case FP_NAN: {
if (IsSignallingNaN(value)) {
if (exception != NULL) {
*exception = true;
}
}
if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
// Convert NaNs as the processor would:
// - The sign is propagated.
// - The payload (mantissa) is transferred as much as possible, except
// that the top bit is forced to '1', making the result a quiet NaN.
uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
: Float16ToRawbits(kFP16NegativeInfinity);
result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits);
result |= (1 << 9); // Force a quiet NaN;
return RawbitsToFloat16(result);
}
case FP_ZERO:
return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
case FP_INFINITE:
return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
case FP_NORMAL:
case FP_SUBNORMAL: {
// Convert double-to-half as the processor would, assuming that FPCR.FZ
// (flush-to-zero) is not set.
// Add the implicit '1' bit to the mantissa.
mantissa += (UINT64_C(1) << 52);
return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
}
}
VIXL_UNREACHABLE();
return kFP16PositiveZero;
}
} // namespace vixl