From 49a4901c78c4ba99f6c6bc6a6b6ae7e64dbfa20d Mon Sep 17 00:00:00 2001 From: Stenzek Date: Tue, 30 May 2023 00:11:06 +1000 Subject: [PATCH] dep: Add biscuit and riscv-disas --- dep/CMakeLists.txt | 4 + dep/biscuit/CMakeLists.txt | 17 + dep/biscuit/LICENSE.md | 12 + dep/biscuit/README.md | 144 ++ dep/biscuit/clang-format | 88 + dep/biscuit/cmake/biscuit-config.cmake.in | 5 + dep/biscuit/include/biscuit/assembler.hpp | 1284 ++++++++++ dep/biscuit/include/biscuit/assert.hpp | 14 + dep/biscuit/include/biscuit/code_buffer.hpp | 211 ++ dep/biscuit/include/biscuit/cpuinfo.hpp | 101 + dep/biscuit/include/biscuit/csr.hpp | 390 +++ dep/biscuit/include/biscuit/isa.hpp | 49 + dep/biscuit/include/biscuit/label.hpp | 173 ++ dep/biscuit/include/biscuit/registers.hpp | 276 +++ dep/biscuit/include/biscuit/vector.hpp | 88 + dep/biscuit/src/CMakeLists.txt | 153 ++ dep/biscuit/src/assembler.cpp | 2376 +++++++++++++++++++ dep/biscuit/src/assembler_crypto.cpp | 149 ++ dep/biscuit/src/assembler_vector.cpp | 1951 +++++++++++++++ dep/biscuit/src/code_buffer.cpp | 111 + dep/biscuit/src/cpuinfo.cpp | 39 + dep/riscv-disas/CMakeLists.txt | 7 + dep/riscv-disas/README.md | 10 + dep/riscv-disas/include/riscv-disas.h | 520 ++++ dep/riscv-disas/source.txt | 1 + dep/riscv-disas/src/riscv-disas.c | 2276 ++++++++++++++++++ 26 files changed, 10449 insertions(+) create mode 100644 dep/biscuit/CMakeLists.txt create mode 100644 dep/biscuit/LICENSE.md create mode 100644 dep/biscuit/README.md create mode 100644 dep/biscuit/clang-format create mode 100644 dep/biscuit/cmake/biscuit-config.cmake.in create mode 100644 dep/biscuit/include/biscuit/assembler.hpp create mode 100644 dep/biscuit/include/biscuit/assert.hpp create mode 100644 dep/biscuit/include/biscuit/code_buffer.hpp create mode 100644 dep/biscuit/include/biscuit/cpuinfo.hpp create mode 100644 dep/biscuit/include/biscuit/csr.hpp create mode 100644 dep/biscuit/include/biscuit/isa.hpp create mode 100644 dep/biscuit/include/biscuit/label.hpp create mode 100644 dep/biscuit/include/biscuit/registers.hpp create mode 100644 dep/biscuit/include/biscuit/vector.hpp create mode 100644 dep/biscuit/src/CMakeLists.txt create mode 100644 dep/biscuit/src/assembler.cpp create mode 100644 dep/biscuit/src/assembler_crypto.cpp create mode 100644 dep/biscuit/src/assembler_vector.cpp create mode 100644 dep/biscuit/src/code_buffer.cpp create mode 100644 dep/biscuit/src/cpuinfo.cpp create mode 100644 dep/riscv-disas/CMakeLists.txt create mode 100644 dep/riscv-disas/README.md create mode 100644 dep/riscv-disas/include/riscv-disas.h create mode 100644 dep/riscv-disas/source.txt create mode 100644 dep/riscv-disas/src/riscv-disas.c diff --git a/dep/CMakeLists.txt b/dep/CMakeLists.txt index 2dcbc331b..8d344f7c0 100644 --- a/dep/CMakeLists.txt +++ b/dep/CMakeLists.txt @@ -35,3 +35,7 @@ if(${CPU_ARCH} STREQUAL "aarch32" OR ${CPU_ARCH} STREQUAL "aarch64") add_subdirectory(vixl) endif() +if(${CPU_ARCH} STREQUAL "riscv64") + add_subdirectory(biscuit) + add_subdirectory(riscv-disas) +endif() diff --git a/dep/biscuit/CMakeLists.txt b/dep/biscuit/CMakeLists.txt new file mode 100644 index 000000000..70c9196e4 --- /dev/null +++ b/dep/biscuit/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 3.15) +project(biscuit VERSION 0.9.1) + +#include(CTest) + +option(BISCUIT_CODE_BUFFER_MMAP "Use mmap for handling code buffers instead of new" OFF) + +# Source directories +add_subdirectory(src) + +#if (BUILD_TESTING) +# add_subdirectory(tests) +#endif() + +#if (BUILD_EXAMPLES) +# add_subdirectory(examples) +#endif() diff --git a/dep/biscuit/LICENSE.md b/dep/biscuit/LICENSE.md new file mode 100644 index 000000000..53cde664d --- /dev/null +++ b/dep/biscuit/LICENSE.md @@ -0,0 +1,12 @@ +Copyright 2021 Lioncash/Lioncache + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. \ No newline at end of file diff --git a/dep/biscuit/README.md b/dep/biscuit/README.md new file mode 100644 index 000000000..e0d7c19e1 --- /dev/null +++ b/dep/biscuit/README.md @@ -0,0 +1,144 @@ +# Biscuit: RISC-V Runtime Code Generation Library + +*RISC it for the biscuit* + +## About + +An experimental runtime code generator for RISC-V. + +This allows for runtime code generation of RISC-V instructions. Similar +to how [Xbyak](https://github.com/herumi/xbyak) allows for runtime code generation of x86 instructions. + + +## Implemented ISA Features + +Includes both 32-bit and 64-bit instructions in the following: + +| Feature | Version | +|:----------|:-------:| +| A | 2.1 | +| B | 1.0 | +| C | 2.0 | +| D | 2.2 | +| F | 2.2 | +| H | 1.0 RC | +| K | 1.0.1 | +| M | 2.0 | +| N | 1.1 | +| Q | 2.2 | +| RV32I | 2.1 | +| RV64I | 2.1 | +| S | 1.12 | +| V | 1.0 | +| Sstc | 0.5.4 | +| Zfh | 1.0 | +| Zfhmin | 1.0 | +| Zicbom | 1.0 | +| Zicbop | 1.0 | +| Zicboz | 1.0 | +| Zicsr | 2.0 | +| Zifencei | 2.0 | +| Zihintntl | 0.2 | + +Note that usually only extensions considered ratified will be implemented +as non-ratified documents are considerably more likely to have +large changes made to them, which makes maintaining instruction +APIs a little annoying. + + +## Dependencies + +Biscuit requires no external dependencies for its library other than the C++ standard library. +The tests, however, use the Catch2 testing library. This is included in tree so there's no need +to worry about installing it yourself if you wish to run said tests. + + +## Building Biscuit + +1. Generate the build files for the project with CMake +2. Hit the build button in your IDE of choice, or run the relevant console command to build for the CMake generator you've chosen. +3. Done. + + +## Running Tests + +1. Generate the build files for the project with CMake +2. Build the tests +3. Run the test executable directly, or enter `ctest` into your terminal. + + +## License + +The library is licensed under the MIT license. + +While it's not a requirement whatsoever, it'd be pretty neat if you told me that you found the library useful :-) + + +## Example + +The following is an adapted equivalent of the `strlen` implementation within the RISC-V bit manipulation extension specification. +For brevity, it has been condensed to only handle little-endian platforms. + +```cpp +// We prepare some contiguous buffer and give the pointer to the beginning +// of the data and the total size of the buffer in bytes to the assembler. + +void strlen_example(uint8_t* buffer, size_t buffer_size) { + using namespace biscuit; + + constexpr int ptrlog = 3; + constexpr int szreg = 8; + + Assembler as(buffer, buffer_size); + Label done; + Label loop; + + as.ANDI(a3, a0, szreg - 1); // Offset + as.ANDI(a1, a0, 0xFF8); // Align pointer + + as.LI(a4, szreg); + as.SUB(a4, a4, a3); // XLEN - offset + as.SLLI(a3, a3, ptrlog); // offset * 8 + as.LD(a2, 0, a1); // Chunk + + // + // Shift the partial/unaligned chunk we loaded to remove the bytes + // from before the start of the string, adding NUL bytes at the end. + // + as.SRL(a2, a2, a3); // chunk >> (offset * 8) + as.ORCB(a2, a2); + as.NOT(a2, a2); + + // Non-NUL bytes in the string have been expanded to 0x00, while + // NUL bytes have become 0xff. Search for the first set bit + // (corresponding to a NUL byte in the original chunk). + as.CTZ(a2, a2); + + // The first chunk is special: compare against the number of valid + // bytes in this chunk. + as.SRLI(a0, a2, 3); + as.BGTU(a4, a0, &done); + as.ADDI(a3, a1, szreg); + as.LI(a4, -1); + + // Our critical loop is 4 instructions and processes data in 4 byte + // or 8 byte chunks. + as.Bind(&loop); + + as.LD(a2, szreg, a1); + as.ADDI(a1, a1, szreg); + as.ORCB(a2, a2); + as.BEQ(a2, a4, &loop); + + as.NOT(a2, a2); + as.CTZ(a2, a2); + as.SUB(a1, a1, a3); + as.ADD(a0, a0, a1); + as.SRLI(a2, a2, 3); + as.ADD(a0, a0, a2); + + as.Bind(&done); + + as.RET(); +} +``` diff --git a/dep/biscuit/clang-format b/dep/biscuit/clang-format new file mode 100644 index 000000000..1c6b71b2e --- /dev/null +++ b/dep/biscuit/clang-format @@ -0,0 +1,88 @@ +--- +Language: Cpp +# BasedOnStyle: LLVM +AccessModifierOffset: -4 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: false +AlignOperands: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: true +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +ColumnLimit: 100 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +IncludeCategories: + - Regex: '^\<[^Q][^/.>]*\>' + Priority: -2 + - Regex: '^\<' + Priority: -1 + - Regex: '^\"' + Priority: 0 +IndentCaseLabels: false +IndentWidth: 4 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: true +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 150 +PointerAlignment: Left +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 4 +UseTab: Never +... diff --git a/dep/biscuit/cmake/biscuit-config.cmake.in b/dep/biscuit/cmake/biscuit-config.cmake.in new file mode 100644 index 000000000..46b180ab1 --- /dev/null +++ b/dep/biscuit/cmake/biscuit-config.cmake.in @@ -0,0 +1,5 @@ +@PACKAGE_INIT@ + +include("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@-targets.cmake") + +check_required_components(@PROJECT_NAME@) diff --git a/dep/biscuit/include/biscuit/assembler.hpp b/dep/biscuit/include/biscuit/assembler.hpp new file mode 100644 index 000000000..34dc301a7 --- /dev/null +++ b/dep/biscuit/include/biscuit/assembler.hpp @@ -0,0 +1,1284 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace biscuit { + +/** + * Code generator for RISC-V code. + * + * User code may inherit from this in order to make use of + * the API more convenient, or use it separately if desired. + */ +class Assembler { +public: + /** + * Constructor + * + * Initializes the underlying code buffer to be able to hold `capacity` bytes. + * + * @param capacity The capacity for the underlying code buffer in bytes. + * If no capacity is specified, then the underlying buffer + * will be 4KB in size. + */ + [[nodiscard]] explicit Assembler(size_t capacity = CodeBuffer::default_capacity); + + /** + * Constructor + * + * @param buffer A non-null pointer to an allocated buffer of size `capacity`. + * @param capacity The capacity of the memory pointed to by `buffer`. + * + * @pre The given memory buffer must not be null. + * @pre The given memory buffer must be at minimum `capacity` bytes in size. + * + * @note The caller is responsible for managing the lifetime of the given memory. + * CodeBuffer will *not* free the memory once it goes out of scope. + */ + [[nodiscard]] explicit Assembler(uint8_t* buffer, size_t capacity); + + // Copy constructor and assignment. + Assembler(const Assembler&) = delete; + Assembler& operator=(const Assembler&) = delete; + + // Move constructor and assignment. + Assembler(Assembler&&) = default; + Assembler& operator=(Assembler&&) = default; + + // Destructor + virtual ~Assembler(); + + /// Gets the underlying code buffer being managed by this assembler. + CodeBuffer& GetCodeBuffer(); + + /** + * Allows swapping out the code buffer used by the assembler. + * + * @param buffer The new buffer for the assembler to emit code into. + * + * @returns The old buffer that the assembler made use of. + */ + CodeBuffer SwapCodeBuffer(CodeBuffer&& buffer) noexcept; + + /** + * Allows rewinding of the code buffer cursor. + * + * @param offset The offset to rewind the cursor by. + * + * @note If no offset is provided, then this function rewinds the + * cursor to the beginning of the buffer. + * + * @note The offset may not be larger than the current cursor offset + * and may not be less than the current buffer starting address. + */ + void RewindBuffer(ptrdiff_t offset = 0) { + m_buffer.RewindCursor(offset); + } + + /// Retrieves the cursor pointer for the underlying code buffer. + [[nodiscard]] uint8_t* GetCursorPointer() noexcept { + return m_buffer.GetCursorPointer(); + } + + /// Retrieves the cursor for the underlying code buffer. + [[nodiscard]] const uint8_t* GetCursorPointer() const noexcept { + return m_buffer.GetCursorPointer(); + } + + /// Retrieves the pointer to an arbitrary location within the underlying code buffer. + [[nodiscard]] uint8_t* GetBufferPointer(ptrdiff_t offset) noexcept { + return m_buffer.GetOffsetPointer(offset); + } + + /// Retrieves the pointer to an arbitrary location within the underlying code buffer. + [[nodiscard]] const uint8_t* GetBufferPointer(ptrdiff_t offset) const noexcept { + return m_buffer.GetOffsetPointer(offset); + } + + /** + * Binds a label to the current offset within the code buffer + * + * @param label A non-null valid label to bind. + */ + void Bind(Label* label); + + // RV32I Instructions + + void ADD(GPR rd, GPR lhs, GPR rhs) noexcept; + void ADDI(GPR rd, GPR rs, int32_t imm) noexcept; + void AND(GPR rd, GPR lhs, GPR rhs) noexcept; + void ANDI(GPR rd, GPR rs, uint32_t imm) noexcept; + + void AUIPC(GPR rd, int32_t imm) noexcept; + + void BEQ(GPR rs1, GPR rs2, Label* label) noexcept; + void BEQZ(GPR rs, Label* label) noexcept; + void BGE(GPR rs1, GPR rs2, Label* label) noexcept; + void BGEU(GPR rs1, GPR rs2, Label* label) noexcept; + void BGEZ(GPR rs, Label* label) noexcept; + void BGT(GPR rs, GPR rt, Label* label) noexcept; + void BGTU(GPR rs, GPR rt, Label* label) noexcept; + void BGTZ(GPR rs, Label* label) noexcept; + void BLE(GPR rs, GPR rt, Label* label) noexcept; + void BLEU(GPR rs, GPR rt, Label* label) noexcept; + void BLEZ(GPR rs, Label* label) noexcept; + void BLT(GPR rs1, GPR rs2, Label* label) noexcept; + void BLTU(GPR rs1, GPR rs2, Label* label) noexcept; + void BLTZ(GPR rs, Label* label) noexcept; + void BNE(GPR rs1, GPR rs2, Label* label) noexcept; + void BNEZ(GPR rs, Label* label) noexcept; + + void BEQ(GPR rs1, GPR rs2, int32_t imm) noexcept; + void BEQZ(GPR rs, int32_t imm) noexcept; + void BGE(GPR rs1, GPR rs2, int32_t imm) noexcept; + void BGEU(GPR rs1, GPR rs2, int32_t imm) noexcept; + void BGEZ(GPR rs, int32_t imm) noexcept; + void BGT(GPR rs, GPR rt, int32_t imm) noexcept; + void BGTU(GPR rs, GPR rt, int32_t imm) noexcept; + void BGTZ(GPR rs, int32_t imm) noexcept; + void BLE(GPR rs, GPR rt, int32_t imm) noexcept; + void BLEU(GPR rs, GPR rt, int32_t imm) noexcept; + void BLEZ(GPR rs, int32_t imm) noexcept; + void BLT(GPR rs1, GPR rs2, int32_t imm) noexcept; + void BLTU(GPR rs1, GPR rs2, int32_t imm) noexcept; + void BLTZ(GPR rs, int32_t imm) noexcept; + void BNE(GPR rs1, GPR rs2, int32_t imm) noexcept; + void BNEZ(GPR rs, int32_t imm) noexcept; + + void CALL(int32_t offset) noexcept; + + void EBREAK() noexcept; + void ECALL() noexcept; + + void FENCE() noexcept; + void FENCE(FenceOrder pred, FenceOrder succ) noexcept; + void FENCEI(GPR rd = x0, GPR rs = x0, uint32_t imm = 0) noexcept; + void FENCETSO() noexcept; + + void J(Label* label) noexcept; + void JAL(Label* label) noexcept; + void JAL(GPR rd, Label* label) noexcept; + + void J(int32_t imm) noexcept; + void JAL(int32_t imm) noexcept; + void JAL(GPR rd, int32_t imm) noexcept; + void JALR(GPR rs) noexcept; + void JALR(GPR rd, int32_t imm, GPR rs1) noexcept; + void JR(GPR rs) noexcept; + + void LB(GPR rd, int32_t imm, GPR rs) noexcept; + void LBU(GPR rd, int32_t imm, GPR rs) noexcept; + void LH(GPR rd, int32_t imm, GPR rs) noexcept; + void LHU(GPR rd, int32_t imm, GPR rs) noexcept; + void LI(GPR rd, uint32_t imm) noexcept; + void LUI(GPR rd, uint32_t imm) noexcept; + void LW(GPR rd, int32_t imm, GPR rs) noexcept; + + void MV(GPR rd, GPR rs) noexcept; + void NEG(GPR rd, GPR rs) noexcept; + + void NOP() noexcept; + + void NOT(GPR rd, GPR rs) noexcept; + void OR(GPR rd, GPR lhs, GPR rhs) noexcept; + void ORI(GPR rd, GPR rs, uint32_t imm) noexcept; + + void PAUSE() noexcept; + void RET() noexcept; + + void SB(GPR rs2, int32_t imm, GPR rs1) noexcept; + void SH(GPR rs2, int32_t imm, GPR rs1) noexcept; + void SW(GPR rs2, int32_t imm, GPR rs1) noexcept; + + void SEQZ(GPR rd, GPR rs) noexcept; + void SGTZ(GPR rd, GPR rs) noexcept; + + void SLL(GPR rd, GPR lhs, GPR rhs) noexcept; + void SLLI(GPR rd, GPR rs, uint32_t shift) noexcept; + + void SLT(GPR rd, GPR lhs, GPR rhs) noexcept; + void SLTI(GPR rd, GPR rs, int32_t imm) noexcept; + void SLTIU(GPR rd, GPR rs, int32_t imm) noexcept; + void SLTU(GPR rd, GPR lhs, GPR rhs) noexcept; + void SLTZ(GPR rd, GPR rs) noexcept; + + void SNEZ(GPR rd, GPR rs) noexcept; + + void SRA(GPR rd, GPR lhs, GPR rhs) noexcept; + void SRAI(GPR rd, GPR rs, uint32_t shift) noexcept; + + void SRL(GPR rd, GPR lhs, GPR rhs) noexcept; + void SRLI(GPR rd, GPR rs, uint32_t shift) noexcept; + + void SUB(GPR rd, GPR lhs, GPR rhs) noexcept; + + void XOR(GPR rd, GPR lhs, GPR rhs) noexcept; + void XORI(GPR rd, GPR rs, uint32_t imm) noexcept; + + // RV64I Base Instruction Set + + void ADDIW(GPR rd, GPR rs, int32_t imm) noexcept; + void ADDW(GPR rd, GPR lhs, GPR rhs) noexcept; + void LD(GPR rd, int32_t imm, GPR rs) noexcept; + void LWU(GPR rd, int32_t imm, GPR rs) noexcept; + void SD(GPR rs2, int32_t imm, GPR rs1) noexcept; + + // NOTE: Perhaps we should coalesce this into the 32-bit variant? + // Keeping them separated allows asserts for catching + // out of range shifts. + void SRAI64(GPR rd, GPR rs, uint32_t shift) noexcept; + void SLLI64(GPR rd, GPR rs, uint32_t shift) noexcept; + void SRLI64(GPR rd, GPR rs, uint32_t shift) noexcept; + + void SLLIW(GPR rd, GPR rs, uint32_t shift) noexcept; + void SRAIW(GPR rd, GPR rs, uint32_t shift) noexcept; + void SRLIW(GPR rd, GPR rs, uint32_t shift) noexcept; + + void SLLW(GPR rd, GPR lhs, GPR rhs) noexcept; + void SRAW(GPR rd, GPR lhs, GPR rhs) noexcept; + void SRLW(GPR rd, GPR lhs, GPR rhs) noexcept; + void SUBW(GPR rd, GPR lhs, GPR rhs) noexcept; + + // Zicsr Extension Instructions + + void CSRRC(GPR rd, CSR csr, GPR rs) noexcept; + void CSRRCI(GPR rd, CSR csr, uint32_t imm) noexcept; + void CSRRS(GPR rd, CSR csr, GPR rs) noexcept; + void CSRRSI(GPR rd, CSR csr, uint32_t imm) noexcept; + void CSRRW(GPR rd, CSR csr, GPR rs) noexcept; + void CSRRWI(GPR rd, CSR csr, uint32_t imm) noexcept; + + void CSRR(GPR rd, CSR csr) noexcept; + void CSWR(CSR csr, GPR rs) noexcept; + + void CSRS(CSR csr, GPR rs) noexcept; + void CSRC(CSR csr, GPR rs) noexcept; + + void CSRCI(CSR csr, uint32_t imm) noexcept; + void CSRSI(CSR csr, uint32_t imm) noexcept; + void CSRWI(CSR csr, uint32_t imm) noexcept; + + void FRCSR(GPR rd) noexcept; + void FSCSR(GPR rd, GPR rs) noexcept; + void FSCSR(GPR rs) noexcept; + + void FRRM(GPR rd) noexcept; + void FSRM(GPR rd, GPR rs) noexcept; + void FSRM(GPR rs) noexcept; + + void FSRMI(GPR rd, uint32_t imm) noexcept; + void FSRMI(uint32_t imm) noexcept; + + void FRFLAGS(GPR rd) noexcept; + void FSFLAGS(GPR rd, GPR rs) noexcept; + void FSFLAGS(GPR rs) noexcept; + + void FSFLAGSI(GPR rd, uint32_t imm) noexcept; + void FSFLAGSI(uint32_t imm) noexcept; + + void RDCYCLE(GPR rd) noexcept; + void RDCYCLEH(GPR rd) noexcept; + + void RDINSTRET(GPR rd) noexcept; + void RDINSTRETH(GPR rd) noexcept; + + void RDTIME(GPR rd) noexcept; + void RDTIMEH(GPR rd) noexcept; + + // Zihintntl Extension Instructions + + void C_NTL_ALL() noexcept; + void C_NTL_S1() noexcept; + void C_NTL_P1() noexcept; + void C_NTL_PALL() noexcept; + void NTL_ALL() noexcept; + void NTL_S1() noexcept; + void NTL_P1() noexcept; + void NTL_PALL() noexcept; + + // RV32M Extension Instructions + + void DIV(GPR rd, GPR rs1, GPR rs2) noexcept; + void DIVU(GPR rd, GPR rs1, GPR rs2) noexcept; + void MUL(GPR rd, GPR rs1, GPR rs2) noexcept; + void MULH(GPR rd, GPR rs1, GPR rs2) noexcept; + void MULHSU(GPR rd, GPR rs1, GPR rs2) noexcept; + void MULHU(GPR rd, GPR rs1, GPR rs2) noexcept; + void REM(GPR rd, GPR rs1, GPR rs2) noexcept; + void REMU(GPR rd, GPR rs1, GPR rs2) noexcept; + + // RV64M Extension Instructions + + void DIVW(GPR rd, GPR rs1, GPR rs2) noexcept; + void DIVUW(GPR rd, GPR rs1, GPR rs2) noexcept; + void MULW(GPR rd, GPR rs1, GPR rs2) noexcept; + void REMW(GPR rd, GPR rs1, GPR rs2) noexcept; + void REMUW(GPR rd, GPR rs1, GPR rs2) noexcept; + + // RV32A Extension Instructions + + void AMOADD_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOAND_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOMAX_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOMAXU_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOMIN_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOMINU_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOOR_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOSWAP_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOXOR_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void LR_W(Ordering ordering, GPR rd, GPR rs) noexcept; + void SC_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + + // RV64A Extension Instructions + + void AMOADD_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOAND_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOMAX_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOMAXU_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOMIN_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOMINU_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOOR_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOSWAP_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void AMOXOR_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + void LR_D(Ordering ordering, GPR rd, GPR rs) noexcept; + void SC_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept; + + // RV32F Extension Instructions + + void FADD_S(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FCLASS_S(GPR rd, FPR rs1) noexcept; + void FCVT_S_W(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_S_WU(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_W_S(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_WU_S(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FDIV_S(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FEQ_S(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLE_S(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLT_S(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLW(FPR rd, int32_t offset, GPR rs) noexcept; + void FMADD_S(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FMAX_S(FPR rd, FPR rs1, FPR rs2) noexcept; + void FMIN_S(FPR rd, FPR rs1, FPR rs2) noexcept; + void FMSUB_S(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FMUL_S(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FMV_W_X(FPR rd, GPR rs1) noexcept; + void FMV_X_W(GPR rd, FPR rs1) noexcept; + void FNMADD_S(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FNMSUB_S(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FSGNJ_S(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSGNJN_S(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSGNJX_S(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSQRT_S(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FSUB_S(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FSW(FPR rs2, int32_t offset, GPR rs1) noexcept; + + void FABS_S(FPR rd, FPR rs) noexcept; + void FMV_S(FPR rd, FPR rs) noexcept; + void FNEG_S(FPR rd, FPR rs) noexcept; + + // RV64F Extension Instructions + + void FCVT_L_S(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_LU_S(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_S_L(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_S_LU(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + + // RV32D Extension Instructions + + void FADD_D(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FCLASS_D(GPR rd, FPR rs1) noexcept; + void FCVT_D_W(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_D_WU(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_W_D(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_WU_D(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_D_S(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_S_D(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FDIV_D(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FEQ_D(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLE_D(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLT_D(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLD(FPR rd, int32_t offset, GPR rs) noexcept; + void FMADD_D(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FMAX_D(FPR rd, FPR rs1, FPR rs2) noexcept; + void FMIN_D(FPR rd, FPR rs1, FPR rs2) noexcept; + void FMSUB_D(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FMUL_D(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FNMADD_D(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FNMSUB_D(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FSGNJ_D(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSGNJN_D(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSGNJX_D(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSQRT_D(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FSUB_D(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FSD(FPR rs2, int32_t offset, GPR rs1) noexcept; + + void FABS_D(FPR rd, FPR rs) noexcept; + void FMV_D(FPR rd, FPR rs) noexcept; + void FNEG_D(FPR rd, FPR rs) noexcept; + + // RV64D Extension Instructions + + void FCVT_L_D(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_LU_D(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_D_L(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_D_LU(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FMV_D_X(FPR rd, GPR rs1) noexcept; + void FMV_X_D(GPR rd, FPR rs1) noexcept; + + // RV32Q Extension Instructions + + void FADD_Q(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FCLASS_Q(GPR rd, FPR rs1) noexcept; + void FCVT_Q_W(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_Q_WU(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_W_Q(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_WU_Q(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_Q_D(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_D_Q(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_Q_S(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_S_Q(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FDIV_Q(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FEQ_Q(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLE_Q(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLT_Q(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLQ(FPR rd, int32_t offset, GPR rs) noexcept; + void FMADD_Q(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FMAX_Q(FPR rd, FPR rs1, FPR rs2) noexcept; + void FMIN_Q(FPR rd, FPR rs1, FPR rs2) noexcept; + void FMSUB_Q(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FMUL_Q(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FNMADD_Q(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FNMSUB_Q(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FSGNJ_Q(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSGNJN_Q(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSGNJX_Q(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSQRT_Q(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FSUB_Q(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FSQ(FPR rs2, int32_t offset, GPR rs1) noexcept; + + void FABS_Q(FPR rd, FPR rs) noexcept; + void FMV_Q(FPR rd, FPR rs) noexcept; + void FNEG_Q(FPR rd, FPR rs) noexcept; + + // RV64Q Extension Instructions + + void FCVT_L_Q(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_LU_Q(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_Q_L(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_Q_LU(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + + // RV32Zfh Extension Instructions + + void FADD_H(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FCLASS_H(GPR rd, FPR rs1) noexcept; + void FCVT_D_H(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_H_D(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_H_Q(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_H_S(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_H_W(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_H_WU(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_Q_H(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_S_H(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_W_H(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_WU_H(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FDIV_H(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FEQ_H(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLE_H(GPR rd, FPR rs1, FPR rs2) noexcept; + void FLH(FPR rd, int32_t offset, GPR rs) noexcept; + void FLT_H(GPR rd, FPR rs1, FPR rs2) noexcept; + void FMADD_H(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FMAX_H(FPR rd, FPR rs1, FPR rs2) noexcept; + void FMIN_H(FPR rd, FPR rs1, FPR rs2) noexcept; + void FMSUB_H(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FMUL_H(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + void FMV_H_X(FPR rd, GPR rs1) noexcept; + void FMV_X_H(GPR rd, FPR rs1) noexcept; + void FNMADD_H(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FNMSUB_H(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode = RMode::DYN) noexcept; + void FSGNJ_H(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSGNJN_H(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSGNJX_H(FPR rd, FPR rs1, FPR rs2) noexcept; + void FSH(FPR rs2, int32_t offset, GPR rs1) noexcept; + void FSQRT_H(FPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FSUB_H(FPR rd, FPR rs1, FPR rs2, RMode rmode = RMode::DYN) noexcept; + + // RV64Zfh Extension Instructions + + void FCVT_L_H(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_LU_H(GPR rd, FPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_H_L(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + void FCVT_H_LU(FPR rd, GPR rs1, RMode rmode = RMode::DYN) noexcept; + + // RVB Extension Instructions + + void ADDUW(GPR rd, GPR rs1, GPR rs2) noexcept; + void ANDN(GPR rd, GPR rs1, GPR rs2) noexcept; + void BCLR(GPR rd, GPR rs1, GPR rs2) noexcept; + void BCLRI(GPR rd, GPR rs, uint32_t bit) noexcept; + void BEXT(GPR rd, GPR rs1, GPR rs2) noexcept; + void BEXTI(GPR rd, GPR rs, uint32_t bit) noexcept; + void BINV(GPR rd, GPR rs1, GPR rs2) noexcept; + void BINVI(GPR rd, GPR rs, uint32_t bit) noexcept; + void BSET(GPR rd, GPR rs1, GPR rs2) noexcept; + void BSETI(GPR rd, GPR rs, uint32_t bit) noexcept; + void CLMUL(GPR rd, GPR rs1, GPR rs2) noexcept; + void CLMULH(GPR rd, GPR rs1, GPR rs2) noexcept; + void CLMULR(GPR rd, GPR rs1, GPR rs2) noexcept; + void CLZ(GPR rd, GPR rs) noexcept; + void CLZW(GPR rd, GPR rs) noexcept; + void CPOP(GPR rd, GPR rs) noexcept; + void CPOPW(GPR rd, GPR rs) noexcept; + void CTZ(GPR rd, GPR rs) noexcept; + void CTZW(GPR rd, GPR rs) noexcept; + void MAX(GPR rd, GPR rs1, GPR rs2) noexcept; + void MAXU(GPR rd, GPR rs1, GPR rs2) noexcept; + void MIN(GPR rd, GPR rs1, GPR rs2) noexcept; + void MINU(GPR rd, GPR rs1, GPR rs2) noexcept; + void ORCB(GPR rd, GPR rs) noexcept; + void ORN(GPR rd, GPR rs1, GPR rs2) noexcept; + void PACK(GPR rd, GPR rs1, GPR rs2) noexcept; + void PACKH(GPR rd, GPR rs1, GPR rs2) noexcept; + void PACKW(GPR rd, GPR rs1, GPR rs2) noexcept; + void REV8_32(GPR rd, GPR rs) noexcept; + void REV8_64(GPR rd, GPR rs) noexcept; + void REV_B(GPR rd, GPR rs) noexcept; + void ROL(GPR rd, GPR rs1, GPR rs2) noexcept; + void ROLW(GPR rd, GPR rs1, GPR rs2) noexcept; + void ROR(GPR rd, GPR rs1, GPR rs2) noexcept; + void RORI(GPR rd, GPR rs, uint32_t rotate_amount) noexcept; + void RORIW(GPR rd, GPR rs, uint32_t rotate_amount) noexcept; + void RORW(GPR rd, GPR rs1, GPR rs2) noexcept; + void SEXTB(GPR rd, GPR rs) noexcept; + void SEXTH(GPR rd, GPR rs) noexcept; + void SH1ADD(GPR rd, GPR rs1, GPR rs2) noexcept; + void SH1ADDUW(GPR rd, GPR rs1, GPR rs2) noexcept; + void SH2ADD(GPR rd, GPR rs1, GPR rs2) noexcept; + void SH2ADDUW(GPR rd, GPR rs1, GPR rs2) noexcept; + void SH3ADD(GPR rd, GPR rs1, GPR rs2) noexcept; + void SH3ADDUW(GPR rd, GPR rs1, GPR rs2) noexcept; + void SLLIUW(GPR rd, GPR rs, uint32_t shift_amount) noexcept; + void UNZIP(GPR rd, GPR rs) noexcept; + void XNOR(GPR rd, GPR rs1, GPR rs2) noexcept; + void XPERMB(GPR rd, GPR rs1, GPR rs2) noexcept; + void XPERMN(GPR rd, GPR rs1, GPR rs2) noexcept; + void ZEXTH_32(GPR rd, GPR rs) noexcept; + void ZEXTH_64(GPR rd, GPR rs) noexcept; + void ZEXTW(GPR rd, GPR rs) noexcept; + void ZIP(GPR rd, GPR rs) noexcept; + + // Scalar Cryptography (RVK) instructions + + void AES32DSI(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept; + void AES32DSMI(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept; + void AES32ESI(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept; + void AES32ESMI(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept; + void AES64DS(GPR rd, GPR rs1, GPR rs2) noexcept; + void AES64DSM(GPR rd, GPR rs1, GPR rs2) noexcept; + void AES64ES(GPR rd, GPR rs1, GPR rs2) noexcept; + void AES64ESM(GPR rd, GPR rs1, GPR rs2) noexcept; + void AES64IM(GPR rd, GPR rs) noexcept; + void AES64KS1I(GPR rd, GPR rs, uint32_t rnum) noexcept; + void AES64KS2(GPR rd, GPR rs1, GPR rs2) noexcept; + void SHA256SIG0(GPR rd, GPR rs) noexcept; + void SHA256SIG1(GPR rd, GPR rs) noexcept; + void SHA256SUM0(GPR rd, GPR rs) noexcept; + void SHA256SUM1(GPR rd, GPR rs) noexcept; + void SHA512SIG0(GPR rd, GPR rs) noexcept; + void SHA512SIG0H(GPR rd, GPR rs1, GPR rs2) noexcept; + void SHA512SIG0L(GPR rd, GPR rs1, GPR rs2) noexcept; + void SHA512SIG1(GPR rd, GPR rs) noexcept; + void SHA512SIG1H(GPR rd, GPR rs1, GPR rs2) noexcept; + void SHA512SIG1L(GPR rd, GPR rs1, GPR rs2) noexcept; + void SHA512SUM0(GPR rd, GPR rs) noexcept; + void SHA512SUM0R(GPR rd, GPR rs1, GPR rs2) noexcept; + void SHA512SUM1(GPR rd, GPR rs) noexcept; + void SHA512SUM1R(GPR rd, GPR rs1, GPR rs2) noexcept; + void SM3P0(GPR rd, GPR rs) noexcept; + void SM3P1(GPR rd, GPR rs) noexcept; + void SM4ED(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept; + void SM4KS(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept; + + // RVC Extension Instructions + + void C_ADD(GPR rd, GPR rs) noexcept; + void C_ADDI(GPR rd, int32_t imm) noexcept; + void C_ADDIW(GPR rd, int32_t imm) noexcept; + void C_ADDI4SPN(GPR rd, uint32_t imm) noexcept; + void C_ADDI16SP(int32_t imm) noexcept; + void C_ADDW(GPR rd, GPR rs) noexcept; + void C_AND(GPR rd, GPR rs) noexcept; + void C_ANDI(GPR rd, uint32_t imm) noexcept; + void C_BEQZ(GPR rs, int32_t offset) noexcept; + void C_BEQZ(GPR rs, Label* label) noexcept; + void C_BNEZ(GPR rs, int32_t offset) noexcept; + void C_BNEZ(GPR rs, Label* label) noexcept; + void C_EBREAK() noexcept; + void C_FLD(FPR rd, uint32_t imm, GPR rs) noexcept; + void C_FLDSP(FPR rd, uint32_t imm) noexcept; + void C_FLW(FPR rd, uint32_t imm, GPR rs) noexcept; + void C_FLWSP(FPR rd, uint32_t imm) noexcept; + void C_FSD(FPR rs2, uint32_t imm, GPR rs1) noexcept; + void C_FSDSP(FPR rs, uint32_t imm) noexcept; + void C_FSW(FPR rs2, uint32_t imm, GPR rs1) noexcept; + void C_FSWSP(FPR rs, uint32_t imm) noexcept; + void C_J(int32_t offset) noexcept; + void C_J(Label* label) noexcept; + void C_JAL(Label* label) noexcept; + void C_JAL(int32_t offset) noexcept; + void C_JALR(GPR rs) noexcept; + void C_JR(GPR rs) noexcept; + void C_LD(GPR rd, uint32_t imm, GPR rs) noexcept; + void C_LDSP(GPR rd, uint32_t imm) noexcept; + void C_LI(GPR rd, int32_t imm) noexcept; + void C_LQ(GPR rd, uint32_t imm, GPR rs) noexcept; + void C_LQSP(GPR rd, uint32_t imm) noexcept; + void C_LUI(GPR rd, uint32_t imm) noexcept; + void C_LW(GPR rd, uint32_t imm, GPR rs) noexcept; + void C_LWSP(GPR rd, uint32_t imm) noexcept; + void C_MV(GPR rd, GPR rs) noexcept; + void C_NOP() noexcept; + void C_OR(GPR rd, GPR rs) noexcept; + void C_SD(GPR rs2, uint32_t imm, GPR rs1) noexcept; + void C_SDSP(GPR rs, uint32_t imm) noexcept; + void C_SLLI(GPR rd, uint32_t shift) noexcept; + void C_SQ(GPR rs2, uint32_t imm, GPR rs1) noexcept; + void C_SQSP(GPR rs, uint32_t imm) noexcept; + void C_SRAI(GPR rd, uint32_t shift) noexcept; + void C_SRLI(GPR rd, uint32_t shift) noexcept; + void C_SUB(GPR rd, GPR rs) noexcept; + void C_SUBW(GPR rd, GPR rs) noexcept; + void C_SW(GPR rs2, uint32_t imm, GPR rs1) noexcept; + void C_SWSP(GPR rs, uint32_t imm) noexcept; + void C_UNDEF() noexcept; + void C_XOR(GPR rd, GPR rs) noexcept; + + // Cache Management Operation Extension Instructions + + void CBO_CLEAN(GPR rs) noexcept; + void CBO_FLUSH(GPR rs) noexcept; + void CBO_INVAL(GPR rs) noexcept; + void CBO_ZERO(GPR rs) noexcept; + void PREFETCH_I(GPR rs, int32_t offset = 0) noexcept; + void PREFETCH_R(GPR rs, int32_t offset = 0) noexcept; + void PREFETCH_W(GPR rs, int32_t offset = 0) noexcept; + + // Privileged Instructions + + void HFENCE_GVMA(GPR rs1, GPR rs2) noexcept; + void HFENCE_VVMA(GPR rs1, GPR rs2) noexcept; + void HINVAL_GVMA(GPR rs1, GPR rs2) noexcept; + void HINVAL_VVMA(GPR rs1, GPR rs2) noexcept; + void HLV_B(GPR rd, GPR rs) noexcept; + void HLV_BU(GPR rd, GPR rs) noexcept; + void HLV_D(GPR rd, GPR rs) noexcept; + void HLV_H(GPR rd, GPR rs) noexcept; + void HLV_HU(GPR rd, GPR rs) noexcept; + void HLV_W(GPR rd, GPR rs) noexcept; + void HLV_WU(GPR rd, GPR rs) noexcept; + void HLVX_HU(GPR rd, GPR rs) noexcept; + void HLVX_WU(GPR rd, GPR rs) noexcept; + void HSV_B(GPR rs2, GPR rs1) noexcept; + void HSV_D(GPR rs2, GPR rs1) noexcept; + void HSV_H(GPR rs2, GPR rs1) noexcept; + void HSV_W(GPR rs2, GPR rs1) noexcept; + void MRET() noexcept; + void SFENCE_INVAL_IR() noexcept; + void SFENCE_VMA(GPR rs1, GPR rs2) noexcept; + void SFENCE_W_INVAL() noexcept; + void SINVAL_VMA(GPR rs1, GPR rs2) noexcept; + void SRET() noexcept; + void URET() noexcept; + void WFI() noexcept; + + // Vector Extension Instructions + + // Vector Integer Instructions + + void VAADD(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VAADD(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VAADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VAADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VADC(Vec vd, Vec vs2, Vec vs1) noexcept; + void VADC(Vec vd, Vec vs2, GPR rs1) noexcept; + void VADC(Vec vd, Vec vs2, int32_t simm) noexcept; + + void VADD(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VADD(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VADD(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VAND(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VAND(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VAND(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VASUB(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VASUB(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VASUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VASUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VCOMPRESS(Vec vd, Vec vs2, Vec vs1) noexcept; + + void VDIV(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VDIV(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VDIVU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VDIVU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFIRST(GPR rd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VID(Vec vd, VecMask mask = VecMask::No) noexcept; + + void VIOTA(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VMACC(Vec vd, GPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VMADC(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMADC(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VMADC(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VMADD(Vec vd, GPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VMAND(Vec vd, Vec vs2, Vec vs1) noexcept; + void VMANDNOT(Vec vd, Vec vs2, Vec vs1) noexcept; + void VMNAND(Vec vd, Vec vs2, Vec vs1) noexcept; + void VMNOR(Vec vd, Vec vs2, Vec vs1) noexcept; + void VMOR(Vec vd, Vec vs2, Vec vs1) noexcept; + void VMORNOT(Vec vd, Vec vs2, Vec vs1) noexcept; + void VMXNOR(Vec vd, Vec vs2, Vec vs1) noexcept; + void VMXOR(Vec vd, Vec vs2, Vec vs1) noexcept; + + void VMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMAX(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMAXU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMAXU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMERGE(Vec vd, Vec vs2, Vec vs1) noexcept; + void VMERGE(Vec vd, Vec vs2, GPR rs1) noexcept; + void VMERGE(Vec vd, Vec vs2, int32_t simm) noexcept; + + void VMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMIN(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMINU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMINU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMSBC(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMSBC(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMSBF(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VMSIF(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VMSOF(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VMSEQ(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMSEQ(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VMSEQ(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VMSGT(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VMSGT(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VMSGTU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VMSGTU(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VMSLE(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMSLE(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VMSLE(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VMSLEU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMSLEU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VMSLEU(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VMSLT(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMSLT(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMSLTU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMSLTU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMSNE(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMSNE(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VMSNE(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMULH(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMULH(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMULHSU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMULHSU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMULHU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMULHU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMV(Vec vd, Vec vs1) noexcept; + void VMV(Vec vd, GPR rs1) noexcept; + void VMV(Vec vd, int32_t simm) noexcept; + + void VMV1R(Vec vd, Vec vs) noexcept; + void VMV2R(Vec vd, Vec vs) noexcept; + void VMV4R(Vec vd, Vec vs) noexcept; + void VMV8R(Vec vd, Vec vs) noexcept; + + void VMV_SX(Vec vd, GPR rs) noexcept; + void VMV_XS(GPR rd, Vec vs) noexcept; + + void VNCLIP(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VNCLIP(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VNCLIP(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VNCLIPU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VNCLIPU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VNCLIPU(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VNMSAC(Vec vd, GPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VNMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VNMSUB(Vec vd, GPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VNSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VNSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VNSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VNSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VNSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VNSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VOR(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VOR(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VOR(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VPOPC(GPR rd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VREDAND(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VREDMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VREDMAXU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VREDMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VREDMINU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VREDOR(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VREDXOR(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + + void VREM(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VREM(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VREMU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VREMU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VRGATHER(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VRGATHER(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VRGATHER(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VRGATHEREI16(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + + void VRSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VRSUB(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VSADD(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSADD(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSADD(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VSADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSADDU(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VSBC(Vec vd, Vec vs2, Vec vs1) noexcept; + void VSBC(Vec vd, Vec vs2, GPR rs1) noexcept; + + void VSEXTVF2(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSEXTVF4(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSEXTVF8(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VSLIDE1DOWN(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSLIDEDOWN(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSLIDEDOWN(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VSLIDE1UP(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSLIDEUP(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSLIDEUP(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VSLL(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSLL(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSLL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VSMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VSSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VSSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VSSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask = VecMask::No) noexcept; + + void VSSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VSSUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSSUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWADD(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWADD(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWADDW(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWADDW(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWADDUW(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWADDUW(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VWMACC(Vec vd, GPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VWMACCSU(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VWMACCSU(Vec vd, GPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VWMACCU(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VWMACCU(Vec vd, GPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VWMACCUS(Vec vd, GPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VWMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWMULSU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWMULSU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWMULU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWMULU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWREDSUMU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + + void VWSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWSUBW(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWSUBW(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWSUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWSUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VWSUBUW(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VWSUBUW(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + + void VXOR(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VXOR(Vec vd, Vec vs2, GPR rs1, VecMask mask = VecMask::No) noexcept; + void VXOR(Vec vd, Vec vs2, int32_t simm, VecMask mask = VecMask::No) noexcept; + + void VZEXTVF2(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VZEXTVF4(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VZEXTVF8(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + // Vector Floating-Point Instructions + + void VFADD(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFADD(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFCLASS(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VFCVT_F_X(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFCVT_F_XU(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFCVT_X_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFCVT_XU_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VFNCVT_F_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFNCVT_F_X(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFNCVT_F_XU(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFNCVT_ROD_F_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFNCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFNCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFNCVT_X_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFNCVT_XU_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VFWCVT_F_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFWCVT_F_X(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFWCVT_F_XU(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFWCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFWCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFWCVT_X_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFWCVT_XU_F(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VFDIV(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFDIV(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + void VFRDIV(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFREDMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFREDMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + + void VFREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFREDOSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + + void VFMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFMADD(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFMAX(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFMERGE(Vec vd, Vec vs2, FPR rs1) noexcept; + + void VFMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFMIN(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFMSUB(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFMUL(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFMV(Vec vd, FPR rs) noexcept; + void VFMV_FS(FPR rd, Vec vs) noexcept; + void VFMV_SF(Vec vd, FPR rs) noexcept; + + void VFNMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFNMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFNMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFNMADD(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFNMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFNMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFNMSUB(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFREC7(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VFSGNJ(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFSGNJ(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFSGNJN(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFSGNJN(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFSGNJX(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFSGNJX(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFSQRT(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + void VFRSQRT7(Vec vd, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VFSLIDE1DOWN(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + void VFSLIDE1UP(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + void VFRSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFWADD(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFWADD(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFWADDW(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFWADDW(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFWMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFWMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFWMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFWMUL(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFWNMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFWNMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFWNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFWNMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFWREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFWREDOSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + + void VFWMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + void VFWMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask = VecMask::No) noexcept; + + void VFWSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFWSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VFWSUBW(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VFWSUBW(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMFEQ(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMFEQ(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMFGE(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + void VMFGT(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMFLE(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMFLE(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMFLT(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMFLT(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + void VMFNE(Vec vd, Vec vs2, Vec vs1, VecMask mask = VecMask::No) noexcept; + void VMFNE(Vec vd, Vec vs2, FPR rs1, VecMask mask = VecMask::No) noexcept; + + // Vector Load/Store Instructions + + void VLE8(Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLE16(Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLE32(Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLE64(Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLM(Vec vd, GPR rs) noexcept; + + void VLSE8(Vec vd, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VLSE16(Vec vd, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VLSE32(Vec vd, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VLSE64(Vec vd, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + + void VLOXEI8(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLOXEI16(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLOXEI32(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLOXEI64(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VLUXEI8(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLUXEI16(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLUXEI32(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLUXEI64(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VLE8FF(Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLE16FF(Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLE32FF(Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLE64FF(Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + + void VLSEGE8(uint32_t num_segments, Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLSEGE16(uint32_t num_segments, Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLSEGE32(uint32_t num_segments, Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + void VLSEGE64(uint32_t num_segments, Vec vd, GPR rs, VecMask mask = VecMask::No) noexcept; + + void VLSSEGE8(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VLSSEGE16(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VLSSEGE32(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VLSSEGE64(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + + void VLOXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLOXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLOXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLOXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VLUXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLUXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLUXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VLUXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VLRE8(uint32_t num_registers, Vec vd, GPR rs) noexcept; + void VL1RE8(Vec vd, GPR rs) noexcept; + void VL2RE8(Vec vd, GPR rs) noexcept; + void VL4RE8(Vec vd, GPR rs) noexcept; + void VL8RE8(Vec vd, GPR rs) noexcept; + + void VLRE16(uint32_t num_registers, Vec vd, GPR rs) noexcept; + void VL1RE16(Vec vd, GPR rs) noexcept; + void VL2RE16(Vec vd, GPR rs) noexcept; + void VL4RE16(Vec vd, GPR rs) noexcept; + void VL8RE16(Vec vd, GPR rs) noexcept; + + void VLRE32(uint32_t num_registers, Vec vd, GPR rs) noexcept; + void VL1RE32(Vec vd, GPR rs) noexcept; + void VL2RE32(Vec vd, GPR rs) noexcept; + void VL4RE32(Vec vd, GPR rs) noexcept; + void VL8RE32(Vec vd, GPR rs) noexcept; + + void VLRE64(uint32_t num_registers, Vec vd, GPR rs) noexcept; + void VL1RE64(Vec vd, GPR rs) noexcept; + void VL2RE64(Vec vd, GPR rs) noexcept; + void VL4RE64(Vec vd, GPR rs) noexcept; + void VL8RE64(Vec vd, GPR rs) noexcept; + + void VSE8(Vec vs, GPR rs, VecMask mask = VecMask::No) noexcept; + void VSE16(Vec vs, GPR rs, VecMask mask = VecMask::No) noexcept; + void VSE32(Vec vs, GPR rs, VecMask mask = VecMask::No) noexcept; + void VSE64(Vec vs, GPR rs, VecMask mask = VecMask::No) noexcept; + void VSM(Vec vs, GPR rs) noexcept; + + void VSSE8(Vec vs, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VSSE16(Vec vs, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VSSE32(Vec vs, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VSSE64(Vec vs, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + + void VSOXEI8(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSOXEI16(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSOXEI32(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSOXEI64(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VSUXEI8(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSUXEI16(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSUXEI32(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSUXEI64(Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VSSEGE8(uint32_t num_segments, Vec vs, GPR rs, VecMask mask = VecMask::No) noexcept; + void VSSEGE16(uint32_t num_segments, Vec vs, GPR rs, VecMask mask = VecMask::No) noexcept; + void VSSEGE32(uint32_t num_segments, Vec vs, GPR rs, VecMask mask = VecMask::No) noexcept; + void VSSEGE64(uint32_t num_segments, Vec vs, GPR rs, VecMask mask = VecMask::No) noexcept; + + void VSSSEGE8(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VSSSEGE16(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VSSSEGE32(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + void VSSSEGE64(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask = VecMask::No) noexcept; + + void VSOXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSOXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSOXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSOXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VSUXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSUXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSUXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + void VSUXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask = VecMask::No) noexcept; + + void VSR(uint32_t num_registers, Vec vs, GPR rs) noexcept; + void VS1R(Vec vs, GPR rs) noexcept; + void VS2R(Vec vs, GPR rs) noexcept; + void VS4R(Vec vs, GPR rs) noexcept; + void VS8R(Vec vs, GPR rs) noexcept; + + // Vector Configuration Setting Instructions + + void VSETIVLI(GPR rd, uint32_t imm, SEW sew, LMUL lmul = LMUL::M1, VTA vta = VTA::No, VMA vma = VMA::No) noexcept; + void VSETVL(GPR rd, GPR rs1, GPR rs2) noexcept; + void VSETVLI(GPR rd, GPR rs, SEW sew, LMUL lmul = LMUL::M1, VTA vta = VTA::No, VMA vma = VMA::No) noexcept; + +private: + // Binds a label to a given offset. + void BindToOffset(Label* label, Label::LocationOffset offset); + + // Links the given label and returns the offset to it. + ptrdiff_t LinkAndGetOffset(Label* label); + + // Resolves all label offsets and patches any necessary + // branch offsets into the branch instructions that + // requires them. + void ResolveLabelOffsets(Label* label); + + CodeBuffer m_buffer; +}; + +} // namespace biscuit diff --git a/dep/biscuit/include/biscuit/assert.hpp b/dep/biscuit/include/biscuit/assert.hpp new file mode 100644 index 000000000..f6c5fa97d --- /dev/null +++ b/dep/biscuit/include/biscuit/assert.hpp @@ -0,0 +1,14 @@ +#pragma once + +#include +#include + +#define BISCUIT_ASSERT(condition) \ + do { \ + if (!(condition)) { \ + std::printf("Assertion failed (%s)\nin %s, function %s line %i\n", \ + #condition, \ + __FILE__, __func__, __LINE__); \ + std::abort(); \ + } \ + } while (false) diff --git a/dep/biscuit/include/biscuit/code_buffer.hpp b/dep/biscuit/include/biscuit/code_buffer.hpp new file mode 100644 index 000000000..46314e480 --- /dev/null +++ b/dep/biscuit/include/biscuit/code_buffer.hpp @@ -0,0 +1,211 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace biscuit { + +/** + * An arbitrarily sized buffer that code is written into. + * + * Also contains other member functions for manipulating + * the data within the code buffer. + */ +class CodeBuffer { +public: + // Default capacity of 4KB. + static constexpr size_t default_capacity = 4096; + + /** + * Constructor + * + * @param capacity The initial capacity of the code buffer in bytes. + */ + explicit CodeBuffer(size_t capacity = default_capacity); + + /** + * Constructor + * + * @param buffer A non-null pointer to an allocated buffer of size `capacity`. + * @param capacity The capacity of the memory pointed to by `buffer`. + * + * @pre The given memory buffer must not be null. + * @pre The given memory buffer must be at minimum `capacity` bytes in size. + * + * @note The caller is responsible for managing the lifetime of the given memory. + * CodeBuffer will *not* free the memory once it goes out of scope. + */ + explicit CodeBuffer(uint8_t* buffer, size_t capacity); + + // Copy constructor and assignment is deleted in order to prevent unintentional memory leaks. + CodeBuffer(const CodeBuffer&) = delete; + CodeBuffer& operator=(const CodeBuffer&) = delete; + + // Move constructing or moving the buffer in general is allowed, as it's a transfer of control. + CodeBuffer(CodeBuffer&& other) noexcept; + CodeBuffer& operator=(CodeBuffer&& other) noexcept; + + /** + * Destructor + * + * If a custom memory buffer is not given to the code buffer, + * then the code buffer will automatically free any memory + * it had allocated in order to be able to emit code. + */ + ~CodeBuffer() noexcept; + + /// Returns whether or not the memory is managed by the code buffer. + [[nodiscard]] bool IsManaged() const noexcept { return m_is_managed; } + + /// Retrieves the current cursor position within the buffer. + [[nodiscard]] ptrdiff_t GetCursorOffset() const noexcept { + return m_cursor - m_buffer; + } + + /// Retrieves the current address of the cursor within the buffer. + [[nodiscard]] uintptr_t GetCursorAddress() const noexcept { + return GetOffsetAddress(GetCursorOffset()); + } + + /// Retrieves the cursor pointer + [[nodiscard]] uint8_t* GetCursorPointer() noexcept { + return GetOffsetPointer(GetCursorOffset()); + } + + /// Retrieves the cursor pointer + [[nodiscard]] const uint8_t* GetCursorPointer() const noexcept { + return GetOffsetPointer(GetCursorOffset()); + } + + /// Retrieves the address of an arbitrary offset within the buffer. + [[nodiscard]] uintptr_t GetOffsetAddress(ptrdiff_t offset) const noexcept { + return reinterpret_cast(GetOffsetPointer(offset)); + } + + /// Retrieves the pointer to an arbitrary location within the buffer. + [[nodiscard]] uint8_t* GetOffsetPointer(ptrdiff_t offset) noexcept { + BISCUIT_ASSERT(offset >= 0 && offset <= GetCursorOffset()); + return m_buffer + offset; + } + + /// Retrieves the pointer to an arbitrary location within the buffer. + [[nodiscard]] const uint8_t* GetOffsetPointer(ptrdiff_t offset) const noexcept { + BISCUIT_ASSERT(offset >= 0 && offset <= GetCursorOffset()); + return m_buffer + offset; + } + + /** + * Allows rewinding of the code buffer cursor. + * + * @param offset The offset to rewind the cursor by. + * + * @note If no offset is provided, then this function rewinds the + * cursor to the beginning of the buffer. + * + * @note The offset may not be larger than the current cursor offset + * and may not be less than the current buffer starting address. + */ + void RewindCursor(ptrdiff_t offset = 0) noexcept { + auto* rewound = m_buffer + offset; + BISCUIT_ASSERT(m_buffer <= rewound && rewound <= m_cursor); + m_cursor = rewound; + } + + /** + * Whether or not the underlying buffer has enough room for the + * given number of bytes. + * + * @param num_bytes The number of bytes to store in the buffer. + */ + [[nodiscard]] bool HasSpaceFor(size_t num_bytes) const noexcept { + return GetRemainingBytes() >= num_bytes; + } + + /// Returns the size of the data written to the buffer in bytes. + [[nodiscard]] size_t GetSizeInBytes() const noexcept { + EnsureBufferRange(); + return static_cast(m_cursor - m_buffer); + } + + /// Returns the total number of remaining bytes in the buffer. + [[nodiscard]] size_t GetRemainingBytes() const noexcept { + EnsureBufferRange(); + return static_cast((m_buffer + m_capacity) - m_cursor); + } + + /** + * Grows the underlying memory of the code buffer + * + * @param new_capacity The new capacity of the code buffer in bytes. + * + * @pre The underlying memory of the code buffer *must* be managed + * by the code buffer itself. Attempts to grow the buffer + * with memory that is not managed by it will result in + * an assertion being hit. + * + * @note Calling this with a new capacity that is less than or equal + * to the current capacity of the buffer will result in + * this function doing nothing. + */ + void Grow(size_t new_capacity); + + /** + * Emits a given value into the code buffer. + * + * @param value The value to emit into the code buffer. + * @tparam T A trivially-copyable type. + */ + template + void Emit(T value) noexcept { + static_assert(std::is_trivially_copyable_v, + "It's undefined behavior to memcpy a non-trivially-copyable type."); + BISCUIT_ASSERT(HasSpaceFor(sizeof(T))); + + std::memcpy(m_cursor, &value, sizeof(T)); + m_cursor += sizeof(T); + } + + /// Emits a 16-bit value into the code buffer. + void Emit16(uint32_t value) noexcept { + Emit(static_cast(value)); + } + + /// Emits a 32-bit value into the code buffer. + void Emit32(uint32_t value) noexcept { + Emit(value); + } + + /** + * Sets the internal code buffer to be executable. + * + * @note This will make the contained region of memory non-writable + * to satisfy operating under W^X contexts. To make the + * region writable again, use SetWritable(). + */ + void SetExecutable(); + + /** + * Sets the internal code buffer to be writable + * + * @note This will make the contained region of memory non-executable + * to satisfy operating under W^X contexts. To make the region + * executable again, use SetExecutable(). + */ + void SetWritable(); + +private: + void EnsureBufferRange() const noexcept { + BISCUIT_ASSERT(m_cursor >= m_buffer && m_cursor <= m_buffer + m_capacity); + } + + uint8_t* m_buffer = nullptr; + uint8_t* m_cursor = nullptr; + size_t m_capacity = 0; + bool m_is_managed = false; +}; + +} // namespace biscuit diff --git a/dep/biscuit/include/biscuit/cpuinfo.hpp b/dep/biscuit/include/biscuit/cpuinfo.hpp new file mode 100644 index 000000000..b5efa7397 --- /dev/null +++ b/dep/biscuit/include/biscuit/cpuinfo.hpp @@ -0,0 +1,101 @@ +// Copyright (c), 2022, KNS Group LLC (YADRO) +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +#pragma once + +#include +#include +#include +#include + +#if defined(__linux__) && defined(__riscv) +#include +#include +#include +#endif + +namespace biscuit { + +#ifndef COMPAT_HWCAP_ISA_I +#define COMPAT_HWCAP_ISA_I (1U << ('I' - 'A')) +#endif + +#ifndef COMPAT_HWCAP_ISA_M +#define COMPAT_HWCAP_ISA_M (1U << ('M' - 'A')) +#endif + +#ifndef COMPAT_HWCAP_ISA_A +#define COMPAT_HWCAP_ISA_A (1U << ('A' - 'A')) +#endif + +#ifndef COMPAT_HWCAP_ISA_F +#define COMPAT_HWCAP_ISA_F (1U << ('F' - 'A')) +#endif + +#ifndef COMPAT_HWCAP_ISA_D +#define COMPAT_HWCAP_ISA_D (1U << ('D' - 'A')) +#endif + +#ifndef COMPAT_HWCAP_ISA_C +#define COMPAT_HWCAP_ISA_C (1U << ('C' - 'A')) +#endif + +#ifndef COMPAT_HWCAP_ISA_V +#define COMPAT_HWCAP_ISA_V (1U << ('V' - 'A')) +#endif + +enum class RISCVExtension : uint64_t { + I = COMPAT_HWCAP_ISA_I, + M = COMPAT_HWCAP_ISA_M, + A = COMPAT_HWCAP_ISA_A, + F = COMPAT_HWCAP_ISA_F, + D = COMPAT_HWCAP_ISA_D, + C = COMPAT_HWCAP_ISA_C, + V = COMPAT_HWCAP_ISA_V +}; + +template +struct CSRReader : public biscuit::Assembler { + // Buffer capacity exactly for 2 instructions. + static constexpr size_t capacity = 8; + + CSRReader() : biscuit::Assembler{CSRReader::capacity} { + CSRR(a0, csr); + RET(); + } + + // Copy constructor and assignment. + CSRReader(const CSRReader&) = delete; + CSRReader& operator=(const CSRReader&) = delete; + + // Move constructor and assignment. + CSRReader(CSRReader&&) = default; + CSRReader& operator=(CSRReader&&) = default; + + template + CSRReaderFunc GetCode() { + this->GetCodeBuffer().SetExecutable(); + return reinterpret_cast(this->GetBufferPointer(0)); + } +}; + +/** + * Class that detects information about a RISC-V CPU. + */ +class CPUInfo { +public: + /** + * Checks if a particular RISC-V extension is available. + * + * @param extension The extension to check. + */ + bool Has(RISCVExtension extension) const; + + /// Returns the vector register length in bytes. + uint32_t GetVlenb() const; +}; + +} // namespace biscuit diff --git a/dep/biscuit/include/biscuit/csr.hpp b/dep/biscuit/include/biscuit/csr.hpp new file mode 100644 index 000000000..79bfe7440 --- /dev/null +++ b/dep/biscuit/include/biscuit/csr.hpp @@ -0,0 +1,390 @@ +#pragma once + +#include + +namespace biscuit { + +// Control and Status Register +enum class CSR : uint32_t { + // clang-format off + + // User-level CSRs + + UStatus = 0x000, // User status register + UIE = 0x004, // User interrupt-enable register + UTVEC = 0x005, // User trap handler base address + UScratch = 0x040, // Scratch register for user trap handlers + UEPC = 0x041, // User exception program counter + UCause = 0x042, // User trap cause + UTVal = 0x043, // User bad address or instruction + UIP = 0x044, // User interrupt pending + + FFlags = 0x001, // Floating-point Accrued Exceptions + FRM = 0x002, // Floating-point Dynamic Rounding Mode + FCSR = 0x003, // Floating-point Control and Status Register (frm + fflags) + + Cycle = 0xC00, // Cycle counter for RDCYCLE instruction. + Time = 0xC01, // Timer for RDTIME instruction. + InstRet = 0xC02, // Instructions retired counter for RDINSTRET instruction. + HPMCounter3 = 0xC03, // Performance-monitoring counter. + HPMCounter4 = 0xC04, // Performance-monitoring counter. + HPMCounter5 = 0xC05, // Performance-monitoring counter. + HPMCounter6 = 0xC06, // Performance-monitoring counter. + HPMCounter7 = 0xC07, // Performance-monitoring counter. + HPMCounter8 = 0xC08, // Performance-monitoring counter. + HPMCounter9 = 0xC09, // Performance-monitoring counter. + HPMCounter10 = 0xC0A, // Performance-monitoring counter. + HPMCounter11 = 0xC0B, // Performance-monitoring counter. + HPMCounter12 = 0xC0C, // Performance-monitoring counter. + HPMCounter13 = 0xC0D, // Performance-monitoring counter. + HPMCounter14 = 0xC0E, // Performance-monitoring counter. + HPMCounter15 = 0xC0F, // Performance-monitoring counter. + HPMCounter16 = 0xC10, // Performance-monitoring counter. + HPMCounter17 = 0xC11, // Performance-monitoring counter. + HPMCounter18 = 0xC12, // Performance-monitoring counter. + HPMCounter19 = 0xC13, // Performance-monitoring counter. + HPMCounter20 = 0xC14, // Performance-monitoring counter. + HPMCounter21 = 0xC15, // Performance-monitoring counter. + HPMCounter22 = 0xC16, // Performance-monitoring counter. + HPMCounter23 = 0xC17, // Performance-monitoring counter. + HPMCounter24 = 0xC18, // Performance-monitoring counter. + HPMCounter25 = 0xC19, // Performance-monitoring counter. + HPMCounter26 = 0xC1A, // Performance-monitoring counter. + HPMCounter27 = 0xC1B, // Performance-monitoring counter. + HPMCounter28 = 0xC1C, // Performance-monitoring counter. + HPMCounter29 = 0xC1D, // Performance-monitoring counter. + HPMCounter30 = 0xC1E, // Performance-monitoring counter. + HPMCounter31 = 0xC1F, // Performance-monitoring counter. + CycleH = 0xC80, // Upper 32 bits of cycle, RV32I only. + TimeH = 0xC81, // Upper 32 bits of time, RV32I only. + InstRetH = 0xC82, // Upper 32 bits of instret, RV32I only. + HPMCounter3H = 0xC83, // Upper 32 bits of HPMCounter3, RV32I only. + HPMCounter4H = 0xC84, // Upper 32 bits of HPMCounter4, RV32I only. + HPMCounter5H = 0xC85, // Upper 32 bits of HPMCounter5, RV32I only. + HPMCounter6H = 0xC86, // Upper 32 bits of HPMCounter6, RV32I only. + HPMCounter7H = 0xC87, // Upper 32 bits of HPMCounter7, RV32I only. + HPMCounter8H = 0xC88, // Upper 32 bits of HPMCounter8, RV32I only. + HPMCounter9H = 0xC89, // Upper 32 bits of HPMCounter9, RV32I only. + HPMCounter10H = 0xC8A, // Upper 32 bits of HPMCounter10, RV32I only. + HPMCounter11H = 0xC8B, // Upper 32 bits of HPMCounter11, RV32I only. + HPMCounter12H = 0xC8C, // Upper 32 bits of HPMCounter12, RV32I only. + HPMCounter13H = 0xC8D, // Upper 32 bits of HPMCounter13, RV32I only. + HPMCounter14H = 0xC8E, // Upper 32 bits of HPMCounter14, RV32I only. + HPMCounter15H = 0xC8F, // Upper 32 bits of HPMCounter15, RV32I only. + HPMCounter16H = 0xC90, // Upper 32 bits of HPMCounter16, RV32I only. + HPMCounter17H = 0xC91, // Upper 32 bits of HPMCounter17, RV32I only. + HPMCounter18H = 0xC92, // Upper 32 bits of HPMCounter18, RV32I only. + HPMCounter19H = 0xC93, // Upper 32 bits of HPMCounter19, RV32I only. + HPMCounter20H = 0xC94, // Upper 32 bits of HPMCounter20, RV32I only. + HPMCounter21H = 0xC95, // Upper 32 bits of HPMCounter21, RV32I only. + HPMCounter22H = 0xC96, // Upper 32 bits of HPMCounter22, RV32I only. + HPMCounter23H = 0xC97, // Upper 32 bits of HPMCounter23, RV32I only. + HPMCounter24H = 0xC98, // Upper 32 bits of HPMCounter24, RV32I only. + HPMCounter25H = 0xC99, // Upper 32 bits of HPMCounter25, RV32I only. + HPMCounter26H = 0xC9A, // Upper 32 bits of HPMCounter26, RV32I only. + HPMCounter27H = 0xC9B, // Upper 32 bits of HPMCounter27, RV32I only. + HPMCounter28H = 0xC9C, // Upper 32 bits of HPMCounter28, RV32I only. + HPMCounter29H = 0xC9D, // Upper 32 bits of HPMCounter29, RV32I only. + HPMCounter30H = 0xC9E, // Upper 32 bits of HPMCounter30, RV32I only. + HPMCounter31H = 0xC9F, // Upper 32 bits of HPMCounter31, RV32I only. + + // Supervisor-level CSRs + + SStatus = 0x100, // Supervisor status register + SEDeleg = 0x102, // Supervisor exception delegation register + SIDeleg = 0x103, // Supervisor interrupt delegation register + SIE = 0x104, // Supervisor interrupt-enable register + STVec = 0x105, // Supervisor trap handler base address + SCounterEn = 0x106, // Supervisor counter enable + + SEnvCfg = 0x10A, // Supervisor environment configuration register + + SScratch = 0x140, // Scratch register for supervisor trap handlers + SEPC = 0x141, // Supervisor exception program counter + SCause = 0x142, // Supervisor trap cause + STVal = 0x143, // Supervisor bad address or instruction + SIP = 0x144, // Supervisor interrupt pending. + + STimeCmp = 0x14D, // Supervisor timer register + STimeCmpH = 0x15D, // Supervisor timer register, RV32 only + + SATP = 0x180, // Supervisor address translation and protection + + SContext = 0x5A8, // Supervisor-mode context register + + // Hypervisor-level CSRs + + HStatus = 0x600, // Hypervisor status register + HEDeleg = 0x602, // Hypervisor exception delegation register + HIDeleg = 0x603, // Hypervisor interrupt delegation register + HIE = 0x604, // Hypervisor interrupt-enable register + HCounterEn = 0x606, // Hypervisor counter enable + HGEIE = 0x607, // Hypervisor guest external interrupt-enable register + + HTVal = 0x643, // Hypervisor bad guest physical address + HIP = 0x644, // Hypervisor interrupt pending + HVIP = 0x645, // Hypervisor virtual interrupt pending + HTInst = 0x64A, // Hypervisor trap instruction (transformed) + HGEIP = 0xE12, // Hypervisor guest external interrupt pending + + HEnvCfg = 0x60A, // Hypervisor environment configuration register + HEnvCfgH = 0x61A, // Additional hypervisor environment configuration register, RV32 only + + HGATP = 0x680, // Hypervisor guest address translation and protection + + HContext = 0x6A8, // Hypervisor-mode context register + + HTimeDelta = 0x605, // Delta for VS/VU-mode timer + HTimeDeltaH = 0x615, // Upper 32 bits of HTimeDelta, HSXLEN=32 only + + VSStatus = 0x200, // Virtual supervisor status register + VSIE = 0x204, // Virtual supervisor interrupt-enable register + VSTVec = 0x205, // Virtual supervisor trap handler base address + VSScratch = 0x240, // Virtual supervisor scratch register + VSEPC = 0x241, // Virtual supervisor exception program register + VSCause = 0x242, // Virtual supervisor trap cause + VSTVal = 0x243, // Virtual supervisor bad address or instruction + VSIP = 0x244, // Virtual supervisor interrupt pending + + VSTimeCmp = 0x24D, // Virtual supervisor timer register + VSTimeCmpH = 0x25D, // Virtual supervisor timer register, RV32 only + + VSATP = 0x280, // Virtual supervisor address translation and protection + + // Machine-level CSRs + + MVendorID = 0xF11, // Vendor ID + MArchID = 0xF12, // Architecture ID + MImpID = 0xF13, // Implementation ID + MHartID = 0xF14, // Hardware Thread ID + MConfigPtr = 0xF15, // Pointer to configuration data structure + + MStatus = 0x300, // Machine status register + MISA = 0x301, // ISA and extensions + MEDeleg = 0x302, // Machine exception delegation register + MIDeleg = 0x303, // Machine interrupt delegation register + MIE = 0x304, // Machine interrupt-enable register + MRVec = 0x305, // Machine trap-handler base address + MCounterEn = 0x306, // Machine counter enable + MStatusH = 0x310, // Additional machine status register, RV32 only + + MScratch = 0x340, // Scratch register for machine trap handlers + MEPC = 0x341, // Machine exception program counter + MCause = 0x342, // Machine trap cause + MTVal = 0x343, // Machine bad address or instruction + MIP = 0x344, // Machine interrupt pending + MTInst = 0x34A, // Machine trap instruction (transformed) + MTVal2 = 0x34B, // Machine bad guest physical address + + MEnvCfg = 0x30A, // Machine environment configuration register + MEnvCfgH = 0x31A, // Additional machine environment configuration register, RV32 only + MSecCfg = 0x747, // Machine security configuration register + MSecCfgH = 0x757, // Additional machine security configuration register, RV32 only + + PMPCfg0 = 0x3A0, // Physical memory protection configuration + PMPCfg1 = 0x3A1, // Physical memory protection configuration, RV32 only + PMPCfg2 = 0x3A2, // Physical memory protection configuration + PMPCfg3 = 0x3A3, // Physical memory protection configuration, RV32 only + PMPCfg4 = 0x3A4, // Physical memory protection configuration + PMPCfg5 = 0x3A5, // Physical memory protection configuration, RV32 only + PMPCfg6 = 0x3A6, // Physical memory protection configuration + PMPCfg7 = 0x3A7, // Physical memory protection configuration, RV32 only + PMPCfg8 = 0x3A8, // Physical memory protection configuration + PMPCfg9 = 0x3A9, // Physical memory protection configuration, RV32 only + PMPCfg10 = 0x3AA, // Physical memory protection configuration + PMPCfg11 = 0x3AB, // Physical memory protection configuration, RV32 only + PMPCfg12 = 0x3AC, // Physical memory protection configuration + PMPCfg13 = 0x3AD, // Physical memory protection configuration, RV32 only + PMPCfg14 = 0x3AE, // Physical memory protection configuration + PMPCfg15 = 0x3AF, // Physical memory protection configuration, RV32 only + PMPAddr0 = 0x3B0, // Physical memory protection address register + PMPAddr1 = 0x3B1, // Physical memory protection address register + PMPAddr2 = 0x3B2, // Physical memory protection address register + PMPAddr3 = 0x3B3, // Physical memory protection address register + PMPAddr4 = 0x3B4, // Physical memory protection address register + PMPAddr5 = 0x3B5, // Physical memory protection address register + PMPAddr6 = 0x3B6, // Physical memory protection address register + PMPAddr7 = 0x3B7, // Physical memory protection address register + PMPAddr8 = 0x3B8, // Physical memory protection address register + PMPAddr9 = 0x3B9, // Physical memory protection address register + PMPAddr10 = 0x3BA, // Physical memory protection address register + PMPAddr11 = 0x3BB, // Physical memory protection address register + PMPAddr12 = 0x3BC, // Physical memory protection address register + PMPAddr13 = 0x3BD, // Physical memory protection address register + PMPAddr14 = 0x3BE, // Physical memory protection address register + PMPAddr15 = 0x3BF, // Physical memory protection address register + PMPAddr16 = 0x3C0, // Physical memory protection address register + PMPAddr17 = 0x3C1, // Physical memory protection address register + PMPAddr18 = 0x3C2, // Physical memory protection address register + PMPAddr19 = 0x3C3, // Physical memory protection address register + PMPAddr20 = 0x3C4, // Physical memory protection address register + PMPAddr21 = 0x3C5, // Physical memory protection address register + PMPAddr22 = 0x3C6, // Physical memory protection address register + PMPAddr23 = 0x3C7, // Physical memory protection address register + PMPAddr24 = 0x3C8, // Physical memory protection address register + PMPAddr25 = 0x3C9, // Physical memory protection address register + PMPAddr26 = 0x3CA, // Physical memory protection address register + PMPAddr27 = 0x3CB, // Physical memory protection address register + PMPAddr28 = 0x3CC, // Physical memory protection address register + PMPAddr29 = 0x3CD, // Physical memory protection address register + PMPAddr30 = 0x3CE, // Physical memory protection address register + PMPAddr31 = 0x3CF, // Physical memory protection address register + PMPAddr32 = 0x3D0, // Physical memory protection address register + PMPAddr33 = 0x3D1, // Physical memory protection address register + PMPAddr34 = 0x3D2, // Physical memory protection address register + PMPAddr35 = 0x3D3, // Physical memory protection address register + PMPAddr36 = 0x3D4, // Physical memory protection address register + PMPAddr37 = 0x3D5, // Physical memory protection address register + PMPAddr38 = 0x3D6, // Physical memory protection address register + PMPAddr39 = 0x3D7, // Physical memory protection address register + PMPAddr40 = 0x3D8, // Physical memory protection address register + PMPAddr41 = 0x3D9, // Physical memory protection address register + PMPAddr42 = 0x3DA, // Physical memory protection address register + PMPAddr43 = 0x3DB, // Physical memory protection address register + PMPAddr44 = 0x3DC, // Physical memory protection address register + PMPAddr45 = 0x3DD, // Physical memory protection address register + PMPAddr46 = 0x3DE, // Physical memory protection address register + PMPAddr47 = 0x3DF, // Physical memory protection address register + PMPAddr48 = 0x3E0, // Physical memory protection address register + PMPAddr49 = 0x3E1, // Physical memory protection address register + PMPAddr50 = 0x3E2, // Physical memory protection address register + PMPAddr51 = 0x3E3, // Physical memory protection address register + PMPAddr52 = 0x3E4, // Physical memory protection address register + PMPAddr53 = 0x3E5, // Physical memory protection address register + PMPAddr54 = 0x3E6, // Physical memory protection address register + PMPAddr55 = 0x3E7, // Physical memory protection address register + PMPAddr56 = 0x3E8, // Physical memory protection address register + PMPAddr57 = 0x3E9, // Physical memory protection address register + PMPAddr58 = 0x3EA, // Physical memory protection address register + PMPAddr59 = 0x3EB, // Physical memory protection address register + PMPAddr60 = 0x3EC, // Physical memory protection address register + PMPAddr61 = 0x3ED, // Physical memory protection address register + PMPAddr62 = 0x3EE, // Physical memory protection address register + PMPAddr63 = 0x3EF, // Physical memory protection address register + + MCycle = 0xB00, // Machine cycle counter + MInstRet = 0xB02, // Machine instructions-retired counter + MHPMCounter3 = 0xB03, // Machine performance-monitoring counter + MHPMCounter4 = 0xB04, // Machine performance-monitoring counter + MHPMCounter5 = 0xB05, // Machine performance-monitoring counter + MHPMCounter6 = 0xB06, // Machine performance-monitoring counter + MHPMCounter7 = 0xB07, // Machine performance-monitoring counter + MHPMCounter8 = 0xB08, // Machine performance-monitoring counter + MHPMCounter9 = 0xB09, // Machine performance-monitoring counter + MHPMCounter10 = 0xB0A, // Machine performance-monitoring counter + MHPMCounter11 = 0xB0B, // Machine performance-monitoring counter + MHPMCounter12 = 0xB0C, // Machine performance-monitoring counter + MHPMCounter13 = 0xB0D, // Machine performance-monitoring counter + MHPMCounter14 = 0xB0E, // Machine performance-monitoring counter + MHPMCounter15 = 0xB0F, // Machine performance-monitoring counter + MHPMCounter16 = 0xB10, // Machine performance-monitoring counter + MHPMCounter17 = 0xB11, // Machine performance-monitoring counter + MHPMCounter18 = 0xB12, // Machine performance-monitoring counter + MHPMCounter19 = 0xB13, // Machine performance-monitoring counter + MHPMCounter20 = 0xB14, // Machine performance-monitoring counter + MHPMCounter21 = 0xB15, // Machine performance-monitoring counter + MHPMCounter22 = 0xB16, // Machine performance-monitoring counter + MHPMCounter23 = 0xB17, // Machine performance-monitoring counter + MHPMCounter24 = 0xB18, // Machine performance-monitoring counter + MHPMCounter25 = 0xB19, // Machine performance-monitoring counter + MHPMCounter26 = 0xB1A, // Machine performance-monitoring counter + MHPMCounter27 = 0xB1B, // Machine performance-monitoring counter + MHPMCounter28 = 0xB1C, // Machine performance-monitoring counter + MHPMCounter29 = 0xB1D, // Machine performance-monitoring counter + MHPMCounter30 = 0xB1E, // Machine performance-monitoring counter + MHPMCounter31 = 0xB1F, // Machine performance-monitoring counter + + MCycleH = 0xB80, // Upper 32 bits ofmcycle, RV32I only + MInstRetH = 0xB82, // Upper 32 bits ofminstret, RV32I only + + MHPMCounter3H = 0xB83, // Upper 32 bits of MHPMCounter3, RV32I only + MHPMCounter4H = 0xB84, // Upper 32 bits of MHPMCounter4, RV32I only + MHPMCounter5H = 0xB85, // Upper 32 bits of MHPMCounter5, RV32I only + MHPMCounter6H = 0xB86, // Upper 32 bits of MHPMCounter6, RV32I only + MHPMCounter7H = 0xB87, // Upper 32 bits of MHPMCounter7, RV32I only + MHPMCounter8H = 0xB88, // Upper 32 bits of MHPMCounter8, RV32I only + MHPMCounter9H = 0xB89, // Upper 32 bits of MHPMCounter9, RV32I only + MHPMCounter10H = 0xB8A, // Upper 32 bits of MHPMCounter10, RV32I only + MHPMCounter11H = 0xB8B, // Upper 32 bits of MHPMCounter11, RV32I only + MHPMCounter12H = 0xB8C, // Upper 32 bits of MHPMCounter12, RV32I only + MHPMCounter13H = 0xB8D, // Upper 32 bits of MHPMCounter13, RV32I only + MHPMCounter14H = 0xB8E, // Upper 32 bits of MHPMCounter14, RV32I only + MHPMCounter15H = 0xB8F, // Upper 32 bits of MHPMCounter15, RV32I only + MHPMCounter16H = 0xB90, // Upper 32 bits of MHPMCounter16, RV32I only + MHPMCounter17H = 0xB91, // Upper 32 bits of MHPMCounter17, RV32I only + MHPMCounter18H = 0xB92, // Upper 32 bits of MHPMCounter18, RV32I only + MHPMCounter19H = 0xB93, // Upper 32 bits of MHPMCounter19, RV32I only + MHPMCounter20H = 0xB94, // Upper 32 bits of MHPMCounter20, RV32I only + MHPMCounter21H = 0xB95, // Upper 32 bits of MHPMCounter21, RV32I only + MHPMCounter22H = 0xB96, // Upper 32 bits of MHPMCounter22, RV32I only + MHPMCounter23H = 0xB97, // Upper 32 bits of MHPMCounter23, RV32I only + MHPMCounter24H = 0xB98, // Upper 32 bits of MHPMCounter24, RV32I only + MHPMCounter25H = 0xB99, // Upper 32 bits of MHPMCounter25, RV32I only + MHPMCounter26H = 0xB9A, // Upper 32 bits of MHPMCounter26, RV32I only + MHPMCounter27H = 0xB9B, // Upper 32 bits of MHPMCounter27, RV32I only + MHPMCounter28H = 0xB9C, // Upper 32 bits of MHPMCounter28, RV32I only + MHPMCounter29H = 0xB9D, // Upper 32 bits of MHPMCounter29, RV32I only + MHPMCounter30H = 0xB9E, // Upper 32 bits of MHPMCounter30, RV32I only + MHPMCounter31H = 0xB9F, // Upper 32 bits of MHPMCounter31, RV32I only + + MCountInhibit = 0x320, // Machine counter-inhibit register + + MHPMEvent3 = 0x323, // Machine performance-monitoring event selector + MHPMEvent4 = 0x324, // Machine performance-monitoring event selector + MHPMEvent5 = 0x325, // Machine performance-monitoring event selector + MHPMEvent6 = 0x326, // Machine performance-monitoring event selector + MHPMEvent7 = 0x327, // Machine performance-monitoring event selector + MHPMEvent8 = 0x328, // Machine performance-monitoring event selector + MHPMEvent9 = 0x329, // Machine performance-monitoring event selector + MHPMEvent10 = 0x32A, // Machine performance-monitoring event selector + MHPMEvent11 = 0x32B, // Machine performance-monitoring event selector + MHPMEvent12 = 0x32C, // Machine performance-monitoring event selector + MHPMEvent13 = 0x32D, // Machine performance-monitoring event selector + MHPMEvent14 = 0x32E, // Machine performance-monitoring event selector + MHPMEvent15 = 0x32F, // Machine performance-monitoring event selector + MHPMEvent16 = 0x330, // Machine performance-monitoring event selector + MHPMEvent17 = 0x331, // Machine performance-monitoring event selector + MHPMEvent18 = 0x332, // Machine performance-monitoring event selector + MHPMEvent19 = 0x333, // Machine performance-monitoring event selector + MHPMEvent20 = 0x334, // Machine performance-monitoring event selector + MHPMEvent21 = 0x335, // Machine performance-monitoring event selector + MHPMEvent22 = 0x336, // Machine performance-monitoring event selector + MHPMEvent23 = 0x337, // Machine performance-monitoring event selector + MHPMEvent24 = 0x338, // Machine performance-monitoring event selector + MHPMEvent25 = 0x339, // Machine performance-monitoring event selector + MHPMEvent26 = 0x33A, // Machine performance-monitoring event selector + MHPMEvent27 = 0x33B, // Machine performance-monitoring event selector + MHPMEvent28 = 0x33C, // Machine performance-monitoring event selector + MHPMEvent29 = 0x33D, // Machine performance-monitoring event selector + MHPMEvent30 = 0x33E, // Machine performance-monitoring event selector + MHPMEvent31 = 0x33F, // Machine performance-monitoring event selector + + TSelect = 0x7A0, // Debug/Trace trigger register select + TData1 = 0x7A1, // First Debug/Trace trigger data register + TData2 = 0x7A2, // Second Debug/Trace trigger data register + TData3 = 0x7A3, // Third Debug/Trace trigger data register + MContext = 0x7A8, // Machine-mode context register + + DCSR = 0x7B0, // Debug control and status register + DPC = 0x7B1, // Debug PC + DScratch0 = 0x7B2, // Debug scratch register 0 + DScratch1 = 0x7B3, // Debug scratch register 1 + + // Scalar Cryptography Entropy Source Extension CSRs + + Seed = 0x015, // Entropy bit provider (up to 16 bits) + + // Vector Extension CSRs + + VStart = 0x008, // Vector start position + VXSat = 0x009, // Fixed-Point Saturate Flag + VXRM = 0x00A, // Fixed-Point Rounding Mode + VCSR = 0x00F, // Vector control and status register + VL = 0xC20, // Vector length + VType = 0xC21, // Vector data type register + VLenb = 0xC22, // Vector register length in bytes + + // clang-format on +}; + +} // namespace biscuit diff --git a/dep/biscuit/include/biscuit/isa.hpp b/dep/biscuit/include/biscuit/isa.hpp new file mode 100644 index 000000000..94a9c239a --- /dev/null +++ b/dep/biscuit/include/biscuit/isa.hpp @@ -0,0 +1,49 @@ +#pragma once + +#include + +// Source file for general values and data structures +// that don't fit a particular criteria related to the ISA. + +namespace biscuit { + +enum class FenceOrder : uint32_t { + W = 1, // Write + R = 2, // Read + O = 4, // Device Output + I = 8, // Device Input + + RW = R | W, + + IO = I | O, + IR = I | R, + IW = I | W, + IRW = I | R | W, + + OI = O | I, + OR = O | R, + OW = O | W, + ORW = O | R | W, + + IORW = I | O | R | W, +}; + +// Atomic ordering +enum class Ordering : uint32_t { + None = 0, // None + RL = 1, // Release + AQ = 2, // Acquire + AQRL = AQ | RL, // Acquire-Release +}; + +// Floating-point Rounding Mode +enum class RMode : uint32_t { + RNE = 0b000, // Round to Nearest, ties to Even + RTZ = 0b001, // Round towards Zero + RDN = 0b010, // Round Down (towards negative infinity) + RUP = 0b011, // Round Up (towards positive infinity) + RMM = 0b100, // Round to Nearest, ties to Max Magnitude + DYN = 0b111, // Dynamic Rounding Mode +}; + +} // namespace biscuit diff --git a/dep/biscuit/include/biscuit/label.hpp b/dep/biscuit/include/biscuit/label.hpp new file mode 100644 index 000000000..8cfeeb2c3 --- /dev/null +++ b/dep/biscuit/include/biscuit/label.hpp @@ -0,0 +1,173 @@ +#pragma once + +#include +#include +#include +#include + +namespace biscuit { + +/** + * A label is a representation of an address that can be used with branch and jump instructions. + * + * Labels do not need to be bound to a location immediately. A label can be created + * to provide branches with a tentative, undecided location that is then bound + * at a later point in time. + * + * @note Any label that is created, is used with a branch instruction, + * but is *not* bound to a location (via Bind() in the assembler) + * will result in an assertion being invoked when the label instance's + * destructor is executed. + * + * @note A label may only be bound to one location. Any attempt to rebind + * a label that is already bound will result in an assertion being + * invoked. + * + * @par + * An example of binding a label: + * + * @code{.cpp} + * Assembler as{...}; + * Label label; + * + * as.BNE(x2, x3, &label); // Use the label + * as.ADD(x7, x8, x9); + * as.XOR(x7, x10, x12); + * as.Bind(&label); // Bind the label to a location + * @endcode + */ +class Label { +public: + using Location = std::optional; + using LocationOffset = Location::value_type; + + /** + * Default constructor. + * + * This constructor results in a label being constructed that is not + * bound to a particular location yet. + */ + explicit Label() = default; + + /// Destructor + ~Label() noexcept { + // It's a logic bug if something references a label and hasn't been handled. + // + // This is usually indicative of a scenario where a label is referenced but + // hasn't been bound to a location. + // + BISCUIT_ASSERT(IsResolved()); + } + + // We disable copying of labels, as this doesn't really make sense to do. + // It also presents a problem. When labels are being resolved, if we have + // two labels pointing to the same place, resolving the links to this address + // are going to clobber each other N times for however many copies of the label + // exist. + // + // This isn't a particularly major problem, since the resolving will still result + // in the same end result, but it does make it annoying to think about label interactions + // moving forward. Thus, I choose to simply not think about it at all! + // + Label(const Label&) = delete; + Label& operator=(const Label&) = delete; + + // Moving labels on the other hand is totally fine, this is just pushing data around + // to another label while invalidating the label having it's data "stolen". + Label(Label&&) noexcept = default; + Label& operator=(Label&&) noexcept = default; + + /** + * Determines whether or not this label instance has a location assigned to it. + * + * A label is considered bound if it has an assigned location. + */ + [[nodiscard]] bool IsBound() const noexcept { + return m_location.has_value(); + } + + /** + * Determines whether or not this label is resolved. + * + * A label is considered resolved when all referencing offsets have been handled. + */ + [[nodiscard]] bool IsResolved() const noexcept { + return m_offsets.empty(); + } + + /** + * Determines whether or not this label is unresolved. + * + * A label is considered unresolved if it still has any unhandled referencing offsets. + */ + [[nodiscard]] bool IsUnresolved() const noexcept { + return !IsResolved(); + } + + /** + * Retrieves the location for this label. + * + * @note If the returned location is empty, then this label has not been assigned + * a location yet. + */ + [[nodiscard]] Location GetLocation() const noexcept { + return m_location; + } + +private: + // A label instance is inherently bound to the assembler it's + // used with, as the offsets within the label set depend on + // said assemblers code buffer. + friend class Assembler; + + /** + * Binds a label to the given location. + * + * @param offset The instruction offset to bind this label to. + * + * @pre The label must not have already been bound to a previous location. + * Attempting to rebind a label is typically, in almost all scenarios, + * the source of bugs. + * Attempting to rebind an already bound label will result in an assertion + * being triggered. + */ + void Bind(LocationOffset offset) noexcept { + BISCUIT_ASSERT(!IsBound()); + m_location = offset; + } + + /** + * Marks the given address as dependent on this label. + * + * This is used in scenarios where a label exists, but has not yet been + * bound to a location yet. It's important to track these addresses, + * as we'll need to patch the dependent branch instructions with the + * proper offset once the label is finally bound by the assembler. + * + * During label binding, the offset will be calculated and inserted + * into dependent instructions. + */ + void AddOffset(LocationOffset offset) { + // If a label is already bound to a location, then offset tracking + // isn't necessary. Tripping this assert means we have a bug somewhere. + BISCUIT_ASSERT(!IsBound()); + BISCUIT_ASSERT(IsNewOffset(offset)); + + m_offsets.insert(offset); + } + + // Clears all the underlying offsets for this label. + void ClearOffsets() noexcept { + m_offsets.clear(); + } + + // Determines whether or not this address has already been added before. + [[nodiscard]] bool IsNewOffset(LocationOffset offset) const noexcept { + return m_offsets.find(offset) == m_offsets.cend(); + } + + std::set m_offsets; + Location m_location; +}; + +} // namespace biscuit diff --git a/dep/biscuit/include/biscuit/registers.hpp b/dep/biscuit/include/biscuit/registers.hpp new file mode 100644 index 000000000..b660e818d --- /dev/null +++ b/dep/biscuit/include/biscuit/registers.hpp @@ -0,0 +1,276 @@ +#pragma once + +#include + +namespace biscuit { + +/** + * Generic abstraction around a register. + * + * This is less bug-prone than using raw primitive sizes + * in opcode emitter functions, since it provides stronger typing. + */ +class Register { +public: + constexpr Register() noexcept = default; + + /// Gets the index for this register. + [[nodiscard]] constexpr uint32_t Index() const noexcept { + return m_index; + } + + /// Determines whether or not this register is a general-purpose register. + [[nodiscard]] constexpr bool IsGPR() const noexcept { + return m_type == Type::GPR; + } + + /// Determines whether or not this register is a floating-point register. + [[nodiscard]] constexpr bool IsFPR() const noexcept { + return m_type == Type::FPR; + } + + /// Determines whether or not this register is a vector register. + [[nodiscard]] constexpr bool IsVector() const noexcept { + return m_type == Type::Vector; + } + +protected: + enum class Type { + GPR, // General purpose register + FPR, // Floating-point register + Vector, // Vector register + }; + + constexpr Register(uint32_t index, Type type) noexcept + : m_index{index}, m_type{type} {} + +private: + uint32_t m_index{}; + Type m_type{}; +}; + +/// General purpose register. +class GPR final : public Register { +public: + constexpr GPR() noexcept : Register{0, Type::GPR} {} + constexpr explicit GPR(uint32_t index) noexcept : Register{index, Type::GPR} {} + + friend constexpr bool operator==(GPR lhs, GPR rhs) noexcept { + return lhs.Index() == rhs.Index(); + } + friend constexpr bool operator!=(GPR lhs, GPR rhs) noexcept { + return !operator==(lhs, rhs); + } +}; + +/// Floating point register. +class FPR final : public Register { +public: + constexpr FPR() noexcept : Register{0, Type::FPR} {} + constexpr explicit FPR(uint32_t index) noexcept : Register{index, Type::FPR} {} + + friend constexpr bool operator==(FPR lhs, FPR rhs) noexcept { + return lhs.Index() == rhs.Index(); + } + friend constexpr bool operator!=(FPR lhs, FPR rhs) noexcept { + return !operator==(lhs, rhs); + } +}; + +/// Vector register. +class Vec final : public Register { +public: + constexpr Vec() noexcept : Register{0, Type::Vector} {} + constexpr explicit Vec(uint32_t index) noexcept : Register{index, Type::Vector} {} + + friend constexpr bool operator==(Vec lhs, Vec rhs) noexcept { + return lhs.Index() == rhs.Index(); + } + friend constexpr bool operator!=(Vec lhs, Vec rhs) noexcept { + return !operator==(lhs, rhs); + } +}; + +// General-purpose Registers + +constexpr GPR x0{0}; +constexpr GPR x1{1}; +constexpr GPR x2{2}; +constexpr GPR x3{3}; +constexpr GPR x4{4}; +constexpr GPR x5{5}; +constexpr GPR x6{6}; +constexpr GPR x7{7}; +constexpr GPR x8{8}; +constexpr GPR x9{9}; +constexpr GPR x10{10}; +constexpr GPR x11{11}; +constexpr GPR x12{12}; +constexpr GPR x13{13}; +constexpr GPR x14{14}; +constexpr GPR x15{15}; +constexpr GPR x16{16}; +constexpr GPR x17{17}; +constexpr GPR x18{18}; +constexpr GPR x19{19}; +constexpr GPR x20{20}; +constexpr GPR x21{21}; +constexpr GPR x22{22}; +constexpr GPR x23{23}; +constexpr GPR x24{24}; +constexpr GPR x25{25}; +constexpr GPR x26{26}; +constexpr GPR x27{27}; +constexpr GPR x28{28}; +constexpr GPR x29{29}; +constexpr GPR x30{30}; +constexpr GPR x31{31}; + +// Symbolic General-purpose Register Names + +constexpr GPR zero{x0}; + +constexpr GPR ra{x1}; +constexpr GPR sp{x2}; +constexpr GPR gp{x3}; +constexpr GPR tp{x4}; +constexpr GPR fp{x8}; + +constexpr GPR a0{x10}; +constexpr GPR a1{x11}; +constexpr GPR a2{x12}; +constexpr GPR a3{x13}; +constexpr GPR a4{x14}; +constexpr GPR a5{x15}; +constexpr GPR a6{x16}; +constexpr GPR a7{x17}; + +constexpr GPR s0{x8}; +constexpr GPR s1{x9}; +constexpr GPR s2{x18}; +constexpr GPR s3{x19}; +constexpr GPR s4{x20}; +constexpr GPR s5{x21}; +constexpr GPR s6{x22}; +constexpr GPR s7{x23}; +constexpr GPR s8{x24}; +constexpr GPR s9{x25}; +constexpr GPR s10{x26}; +constexpr GPR s11{x27}; + +constexpr GPR t0{x5}; +constexpr GPR t1{x6}; +constexpr GPR t2{x7}; +constexpr GPR t3{x28}; +constexpr GPR t4{x29}; +constexpr GPR t5{x30}; +constexpr GPR t6{x31}; + +// Floating-point registers + +constexpr FPR f0{0}; +constexpr FPR f1{1}; +constexpr FPR f2{2}; +constexpr FPR f3{3}; +constexpr FPR f4{4}; +constexpr FPR f5{5}; +constexpr FPR f6{6}; +constexpr FPR f7{7}; +constexpr FPR f8{8}; +constexpr FPR f9{9}; +constexpr FPR f10{10}; +constexpr FPR f11{11}; +constexpr FPR f12{12}; +constexpr FPR f13{13}; +constexpr FPR f14{14}; +constexpr FPR f15{15}; +constexpr FPR f16{16}; +constexpr FPR f17{17}; +constexpr FPR f18{18}; +constexpr FPR f19{19}; +constexpr FPR f20{20}; +constexpr FPR f21{21}; +constexpr FPR f22{22}; +constexpr FPR f23{23}; +constexpr FPR f24{24}; +constexpr FPR f25{25}; +constexpr FPR f26{26}; +constexpr FPR f27{27}; +constexpr FPR f28{28}; +constexpr FPR f29{29}; +constexpr FPR f30{30}; +constexpr FPR f31{31}; + +// Symbolic Floating-point Register Names + +constexpr FPR fa0{f10}; +constexpr FPR fa1{f11}; +constexpr FPR fa2{f12}; +constexpr FPR fa3{f13}; +constexpr FPR fa4{f14}; +constexpr FPR fa5{f15}; +constexpr FPR fa6{f16}; +constexpr FPR fa7{f17}; + +constexpr FPR ft0{f0}; +constexpr FPR ft1{f1}; +constexpr FPR ft2{f2}; +constexpr FPR ft3{f3}; +constexpr FPR ft4{f4}; +constexpr FPR ft5{f5}; +constexpr FPR ft6{f6}; +constexpr FPR ft7{f7}; +constexpr FPR ft8{f28}; +constexpr FPR ft9{f29}; +constexpr FPR ft10{f30}; +constexpr FPR ft11{f31}; + +constexpr FPR fs0{f8}; +constexpr FPR fs1{f9}; +constexpr FPR fs2{f18}; +constexpr FPR fs3{f19}; +constexpr FPR fs4{f20}; +constexpr FPR fs5{f21}; +constexpr FPR fs6{f22}; +constexpr FPR fs7{f23}; +constexpr FPR fs8{f24}; +constexpr FPR fs9{f25}; +constexpr FPR fs10{f26}; +constexpr FPR fs11{f27}; + +// Vector registers (V extension) + +constexpr Vec v0{0}; +constexpr Vec v1{1}; +constexpr Vec v2{2}; +constexpr Vec v3{3}; +constexpr Vec v4{4}; +constexpr Vec v5{5}; +constexpr Vec v6{6}; +constexpr Vec v7{7}; +constexpr Vec v8{8}; +constexpr Vec v9{9}; +constexpr Vec v10{10}; +constexpr Vec v11{11}; +constexpr Vec v12{12}; +constexpr Vec v13{13}; +constexpr Vec v14{14}; +constexpr Vec v15{15}; +constexpr Vec v16{16}; +constexpr Vec v17{17}; +constexpr Vec v18{18}; +constexpr Vec v19{19}; +constexpr Vec v20{20}; +constexpr Vec v21{21}; +constexpr Vec v22{22}; +constexpr Vec v23{23}; +constexpr Vec v24{24}; +constexpr Vec v25{25}; +constexpr Vec v26{26}; +constexpr Vec v27{27}; +constexpr Vec v28{28}; +constexpr Vec v29{29}; +constexpr Vec v30{30}; +constexpr Vec v31{31}; + +} // namespace biscuit diff --git a/dep/biscuit/include/biscuit/vector.hpp b/dep/biscuit/include/biscuit/vector.hpp new file mode 100644 index 000000000..d31208ed7 --- /dev/null +++ b/dep/biscuit/include/biscuit/vector.hpp @@ -0,0 +1,88 @@ +#pragma once + +#include + +// Source file for anything specific to the RISC-V vector extension. + +namespace biscuit { + +/// Describes whether or not an instruction should make use of the mask vector. +enum class VecMask : uint32_t { + Yes = 0, + No = 1, +}; + +/// Describes the selected element width. +enum class SEW : uint32_t { + E8 = 0b000, // 8-bit vector elements + E16 = 0b001, // 16-bit vector elements + E32 = 0b010, // 32-bit vector elements + E64 = 0b011, // 64-bit vector elements + E128 = 0b100, // 128-bit vector elements + E256 = 0b101, // 256-bit vector elements + E512 = 0b110, // 512-bit vector elements + E1024 = 0b111, // 1024-bit vector elements +}; + +/// Describes the selected register group multiplier. +enum class LMUL : uint32_t { + M1 = 0b000, // Group of one vector + M2 = 0b001, // Groups of two vectors + M4 = 0b010, // Groups of four vectors + M8 = 0b011, // Groups of eight vectors + MF8 = 0b101, // Fractional vector group (1/8) + MF4 = 0b110, // Fractional vector group (1/4) + MF2 = 0b111, // Fractional vector group (1/2) +}; + +/** + * Describes whether or not vector masks are agnostic. + * + * From the RVV spec: + * + * When a set is marked undisturbed, the corresponding set of + * destination elements in a vector register group retain the + * value they previously held. + * + * When a set is marked agnostic, the corresponding set of destination + * elements in any vector destination operand can either retain the value + * they previously held, or are overwritten with 1s. + * + * Within a single vector instruction, each destination element can be either + * left undisturbed or overwritten with 1s, in any combination, and the pattern + * of undisturbed or overwritten with 1s is not required to be deterministic when + * the instruction is executed with the same inputs. In addition, except for + * mask load instructions, any element in the tail of a mask result can also be + * written with the value the mask-producing operation would have calculated with vl=VLMAX + */ +enum class VMA : uint32_t { + No, // Undisturbed + Yes, // Agnostic +}; + +/** + * Describes whether or not vector tail elements are agnostic. + * + * From the RVV spec: + * + * When a set is marked undisturbed, the corresponding set of + * destination elements in a vector register group retain the + * value they previously held. + * + * When a set is marked agnostic, the corresponding set of destination + * elements in any vector destination operand can either retain the value + * they previously held, or are overwritten with 1s. + * + * Within a single vector instruction, each destination element can be either + * left undisturbed or overwritten with 1s, in any combination, and the pattern + * of undisturbed or overwritten with 1s is not required to be deterministic when + * the instruction is executed with the same inputs. In addition, except for + * mask load instructions, any element in the tail of a mask result can also be + * written with the value the mask-producing operation would have calculated with vl=VLMAX + */ +enum class VTA : uint32_t { + No, // Undisturbed + Yes, // Agnostic +}; + +} // namespace biscuit diff --git a/dep/biscuit/src/CMakeLists.txt b/dep/biscuit/src/CMakeLists.txt new file mode 100644 index 000000000..7ffc7785b --- /dev/null +++ b/dep/biscuit/src/CMakeLists.txt @@ -0,0 +1,153 @@ +# Main library + +add_library(biscuit + # Source files + assembler.cpp + assembler_crypto.cpp + assembler_vector.cpp + code_buffer.cpp + cpuinfo.cpp + + # Headers + "${PROJECT_SOURCE_DIR}/include/biscuit/assembler.hpp" + "${PROJECT_SOURCE_DIR}/include/biscuit/assert.hpp" + "${PROJECT_SOURCE_DIR}/include/biscuit/code_buffer.hpp" + "${PROJECT_SOURCE_DIR}/include/biscuit/csr.hpp" + "${PROJECT_SOURCE_DIR}/include/biscuit/isa.hpp" + "${PROJECT_SOURCE_DIR}/include/biscuit/label.hpp" + "${PROJECT_SOURCE_DIR}/include/biscuit/registers.hpp" + "${PROJECT_SOURCE_DIR}/include/biscuit/vector.hpp" + "${PROJECT_SOURCE_DIR}/include/biscuit/cpuinfo.hpp" +) +add_library(biscuit::biscuit ALIAS biscuit) + +target_include_directories(biscuit +PUBLIC + $ + $ + +PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} +) + +target_compile_features(biscuit +PRIVATE + cxx_std_20 +) + +if (MSVC) + target_compile_options(biscuit + PRIVATE + /MP + /Zi + /Zo + /permissive- + /EHsc + /utf-8 + /volatile:iso + /Zc:externConstexpr + /Zc:inline + /Zc:throwingNew + + # Warnings + /W4 + /we4062 # enumerator 'identifier' in a switch of enum 'enumeration' is not handled + /we4101 # 'identifier': unreferenced local variable + /we4265 # 'class': class has virtual functions, but destructor is not virtual + /we4287 # 'operator' : unsigned/negative constant mismatch + /we4365 # 'action' : conversion from 'type_1' to 'type_2', signed/unsigned mismatch + /we4388 # signed/unsigned mismatch + /we4547 # 'operator' : operator before comma has no effect; expected operator with side-effect + /we4549 # 'operator1': operator before comma has no effect; did you intend 'operator2'? + /we4555 # Expression has no effect; expected expression with side-effect + /we4715 # 'function': not all control paths return a value + /we4834 # Discarding return value of function with 'nodiscard' attribute + /we5038 # data member 'member1' will be initialized after data member 'member2' + ) +elseif (("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") OR ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")) + target_compile_options(biscuit + PRIVATE + -Wall + -Wextra + -Wconversion + -Wsign-conversion + + -Werror=array-bounds + -Werror=cast-qual + -Werror=ignored-qualifiers + -Werror=implicit-fallthrough + -Werror=sign-compare + -Werror=reorder + -Werror=uninitialized + -Werror=unused-function + -Werror=unused-result + -Werror=unused-variable + ) +endif() + +if (BISCUIT_CODE_BUFFER_MMAP) + target_compile_definitions(biscuit + PRIVATE + -DBISCUIT_CODE_BUFFER_MMAP + ) +endif() + +# Install target + +include(GNUInstallDirs) +set(BISCUIT_INSTALL_CONFIGDIR "${CMAKE_INSTALL_LIBDIR}/cmake/biscuit") + +# Set install target and relevant includes. +install(TARGETS biscuit + EXPORT biscuit-targets + LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" + ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" +) +install( + DIRECTORY "${PROJECT_SOURCE_DIR}/include/" + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" +) + +# Export targets to a script +install(EXPORT biscuit-targets + FILE + biscuit-targets.cmake + NAMESPACE + biscuit:: + DESTINATION + "${BISCUIT_INSTALL_CONFIGDIR}" +) + +# Now create the config version script +include(CMakePackageConfigHelpers) +write_basic_package_version_file( + "${CMAKE_CURRENT_BINARY_DIR}/biscuit-config-version.cmake" + VERSION + ${PROJECT_VERSION} + COMPATIBILITY + SameMajorVersion +) + +configure_package_config_file( + "${PROJECT_SOURCE_DIR}/cmake/biscuit-config.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/biscuit-config.cmake" + + INSTALL_DESTINATION "${BISCUIT_INSTALL_CONFIGDIR}" +) + +# Now install the config and version files. +install(FILES + "${CMAKE_CURRENT_BINARY_DIR}/biscuit-config.cmake" + "${CMAKE_CURRENT_BINARY_DIR}/biscuit-config-version.cmake" + + DESTINATION "${BISCUIT_INSTALL_CONFIGDIR}" +) + +# Export library from the build tree. +export(EXPORT biscuit-targets + FILE + "${CMAKE_CURRENT_BINARY_DIR}/biscuit-targets.cmake" + NAMESPACE + biscuit:: +) +export(PACKAGE biscuit) diff --git a/dep/biscuit/src/assembler.cpp b/dep/biscuit/src/assembler.cpp new file mode 100644 index 000000000..60bd277f2 --- /dev/null +++ b/dep/biscuit/src/assembler.cpp @@ -0,0 +1,2376 @@ +#include +#include + +#include +#include + +namespace biscuit { +namespace { +// Determines if a value lies within the range of a 6-bit immediate. +[[nodiscard]] bool IsValidSigned6BitImm(ptrdiff_t value) noexcept { + return value >= -32 && value <= 31; +} + +// S-type and I-type immediates are 12 bits in size +[[nodiscard]] bool IsValidSigned12BitImm(ptrdiff_t value) noexcept { + return value >= -2048 && value <= 2047; +} + +// B-type immediates only provide -4KiB to +4KiB range branches. +[[nodiscard]] bool IsValidBTypeImm(ptrdiff_t value) noexcept { + return value >= -4096 && value <= 4095; +} + +// J-type immediates only provide -1MiB to +1MiB range branches. +[[nodiscard]] bool IsValidJTypeImm(ptrdiff_t value) noexcept { + return value >= -0x80000 && value <= 0x7FFFF; +} + +// CB-type immediates only provide -256B to +256B range branches. +[[nodiscard]] bool IsValidCBTypeImm(ptrdiff_t value) noexcept { + return value >= -256 && value <= 255; +} + +// CJ-type immediates only provide -2KiB to +2KiB range branches. +[[nodiscard]] bool IsValidCJTypeImm(ptrdiff_t value) noexcept { + return IsValidSigned12BitImm(value); +} + +// Determines whether or not the register fits in 3-bit compressed encoding. +[[nodiscard]] bool IsValid3BitCompressedReg(Register reg) noexcept { + const auto index = reg.Index(); + return index >= 8 && index <= 15; +} + +// Determines whether or not the given shift amount is valid for a compressed shift instruction +[[nodiscard]] bool IsValidCompressedShiftAmount(uint32_t shift) noexcept { + return shift > 0 && shift <= 64; +} + +// Turns a compressed register into its encoding. +[[nodiscard]] uint32_t CompressedRegTo3BitEncoding(Register reg) noexcept { + return reg.Index() - 8; +} + +// Transforms a regular value into an immediate encoded in a B-type instruction. +[[nodiscard]] uint32_t TransformToBTypeImm(uint32_t imm) noexcept { + // clang-format off + return ((imm & 0x07E0) << 20) | + ((imm & 0x1000) << 19) | + ((imm & 0x001E) << 7) | + ((imm & 0x0800) >> 4); + // clang-format on +} + +// Transforms a regular value into an immediate encoded in a J-type instruction. +[[nodiscard]] uint32_t TransformToJTypeImm(uint32_t imm) noexcept { + // clang-format off + return ((imm & 0x0FF000) >> 0) | + ((imm & 0x000800) << 9) | + ((imm & 0x0007FE) << 20) | + ((imm & 0x100000) << 11); + // clang-format on +} + +// Transforms a regular value into an immediate encoded in a CB-type instruction. +[[nodiscard]] uint32_t TransformToCBTypeImm(uint32_t imm) noexcept { + // clang-format off + return ((imm & 0x0C0) >> 1) | + ((imm & 0x006) << 2) | + ((imm & 0x020) >> 3) | + ((imm & 0x018) << 7) | + ((imm & 0x100) << 4); + // clang-format on +} + +// Transforms a regular value into an immediate encoded in a CJ-type instruction. +[[nodiscard]] uint32_t TransformToCJTypeImm(uint32_t imm) noexcept { + // clang-format off + return ((imm & 0x800) << 1) | + ((imm & 0x010) << 7) | + ((imm & 0x300) << 1) | + ((imm & 0x400) >> 2) | + ((imm & 0x040) << 1) | + ((imm & 0x080) >> 1) | + ((imm & 0x00E) << 4) | + ((imm & 0x020) >> 3); + // clang-format on +} + +// Emits a B type RISC-V instruction. These consist of: +// imm[12|10:5] | rs2 | rs1 | funct3 | imm[4:1] | imm[11] | opcode +void EmitBType(CodeBuffer& buffer, uint32_t imm, GPR rs2, GPR rs1, uint32_t funct3, uint32_t opcode) noexcept { + imm &= 0x1FFE; + + buffer.Emit32(TransformToBTypeImm(imm) | (rs2.Index() << 20) | (rs1.Index() << 15) | ((funct3 & 0b111) << 12) | (opcode & 0x7F)); +} + +// Emits a I type RISC-V instruction. These consist of: +// imm[11:0] | rs1 | funct3 | rd | opcode +void EmitIType(CodeBuffer& buffer, uint32_t imm, Register rs1, uint32_t funct3, Register rd, uint32_t opcode) noexcept { + imm &= 0xFFF; + + buffer.Emit32((imm << 20) | (rs1.Index() << 15) | ((funct3 & 0b111) << 12) | (rd.Index() << 7) | (opcode & 0x7F)); +} + +// Emits a J type RISC-V instruction. These consist of: +// imm[20|10:1|11|19:12] | rd | opcode +void EmitJType(CodeBuffer& buffer, uint32_t imm, GPR rd, uint32_t opcode) noexcept { + imm &= 0x1FFFFE; + + buffer.Emit32(TransformToJTypeImm(imm) | rd.Index() << 7 | (opcode & 0x7F)); +} + +// Emits a R type RISC instruction. These consist of: +// funct7 | rs2 | rs1 | funct3 | rd | opcode +void EmitRType(CodeBuffer& buffer, uint32_t funct7, Register rs2, Register rs1, uint32_t funct3, + Register rd, uint32_t opcode) noexcept { + // clang-format off + const auto value = ((funct7 & 0xFF) << 25) | + (rs2.Index() << 20) | + (rs1.Index() << 15) | + ((funct3 & 0b111) << 12) | + (rd.Index() << 7) | + (opcode & 0x7F); + // clang-format off + + buffer.Emit32(value); +} + +// Emits a R type RISC instruction. These consist of: +// funct7 | rs2 | rs1 | funct3 | rd | opcode +void EmitRType(CodeBuffer& buffer, uint32_t funct7, FPR rs2, FPR rs1, RMode funct3, FPR rd, uint32_t opcode) noexcept { + EmitRType(buffer, funct7, rs2, rs1, static_cast(funct3), rd, opcode); +} + +// Emits a R4 type RISC instruction. These consist of: +// rs3 | funct2 | rs2 | rs1 | funct3 | rd | opcode +void EmitR4Type(CodeBuffer& buffer, FPR rs3, uint32_t funct2, FPR rs2, FPR rs1, RMode funct3, FPR rd, uint32_t opcode) noexcept { + const auto reg_bits = (rs3.Index() << 27) | (rs2.Index() << 20) | (rs1.Index() << 15) | (rd.Index() << 7); + const auto funct_bits = ((funct2 & 0b11) << 25) | (static_cast(funct3) << 12); + buffer.Emit32(reg_bits | funct_bits | (opcode & 0x7F)); +} + +// Emits a S type RISC-V instruction. These consist of: +// imm[11:5] | rs2 | rs1 | funct3 | imm[4:0] | opcode +void EmitSType(CodeBuffer& buffer, uint32_t imm, Register rs2, GPR rs1, uint32_t funct3, uint32_t opcode) noexcept { + imm &= 0xFFF; + + // clang-format off + const auto new_imm = ((imm & 0x01F) << 7) | + ((imm & 0xFE0) << 20); + // clang-format on + + buffer.Emit32(new_imm | (rs2.Index() << 20) | (rs1.Index() << 15) | ((funct3 & 0b111) << 12) | (opcode & 0x7F)); +} + +// Emits a U type RISC-V instruction. These consist of: +// imm[31:12] | rd | opcode +void EmitUType(CodeBuffer& buffer, uint32_t imm, GPR rd, uint32_t opcode) noexcept { + buffer.Emit32((imm & 0x000FFFFF) << 12 | rd.Index() << 7 | (opcode & 0x7F)); +} + +// Emits an atomic instruction. +void EmitAtomic(CodeBuffer& buffer, uint32_t funct5, Ordering ordering, GPR rs2, GPR rs1, + uint32_t funct3, GPR rd, uint32_t opcode) noexcept { + const auto funct7 = (funct5 << 2) | static_cast(ordering); + EmitRType(buffer, funct7, rs2, rs1, funct3, rd, opcode); +} + +// Emits a fence instruction +void EmitFENCE(CodeBuffer& buffer, uint32_t fm, FenceOrder pred, FenceOrder succ, + GPR rs, uint32_t funct3, GPR rd, uint32_t opcode) noexcept { + // clang-format off + buffer.Emit32(((fm & 0b1111) << 28) | + (static_cast(pred) << 24) | + (static_cast(succ) << 20) | + (rs.Index() << 15) | + ((funct3 & 0b111) << 12) | + (rd.Index() << 7) | + (opcode & 0x7F)); + // clang-format on +} + +// Emits a compressed branch instruction. These consist of: +// funct3 | imm[8|4:3] | rs | imm[7:6|2:1|5] | op +void EmitCompressedBranch(CodeBuffer& buffer, uint32_t funct3, int32_t offset, GPR rs, uint32_t op) noexcept { + BISCUIT_ASSERT(IsValidCBTypeImm(offset)); + BISCUIT_ASSERT(IsValid3BitCompressedReg(rs)); + + const auto transformed_imm = TransformToCBTypeImm(static_cast(offset)); + const auto rs_san = CompressedRegTo3BitEncoding(rs); + buffer.Emit16(((funct3 & 0b111) << 13) | transformed_imm | (rs_san << 7) | (op & 0b11)); +} + +// Emits a compressed jump instruction. These consist of: +// funct3 | imm | op +void EmitCompressedJump(CodeBuffer& buffer, uint32_t funct3, int32_t offset, uint32_t op) noexcept { + BISCUIT_ASSERT(IsValidCJTypeImm(offset)); + buffer.Emit16(TransformToCJTypeImm(static_cast(offset)) | ((funct3 & 0b111) << 13) | (op & 0b11)); +} + +// Emits a compress immediate instruction. These consist of: +// funct3 | imm | rd | imm | op +void EmitCompressedImmediate(CodeBuffer& buffer, uint32_t funct3, uint32_t imm, GPR rd, uint32_t op) noexcept { + BISCUIT_ASSERT(rd != x0); + const auto new_imm = ((imm & 0b11111) << 2) | ((imm & 0b100000) << 7); + buffer.Emit16(((funct3 & 0b111) << 13) | new_imm | (rd.Index() << 7) | (op & 0b11)); +} + +// Emits a compressed load instruction. These consist of: +// funct3 | imm | rs1 | imm | rd | op +void EmitCompressedLoad(CodeBuffer& buffer, uint32_t funct3, uint32_t imm, GPR rs, Register rd, uint32_t op) noexcept { + BISCUIT_ASSERT(IsValid3BitCompressedReg(rs)); + BISCUIT_ASSERT(IsValid3BitCompressedReg(rd)); + + imm &= 0xF8; + + const auto imm_enc = ((imm & 0x38) << 7) | ((imm & 0xC0) >> 1); + const auto rd_san = CompressedRegTo3BitEncoding(rd); + const auto rs_san = CompressedRegTo3BitEncoding(rs); + buffer.Emit16(((funct3 & 0b111) << 13) | imm_enc | (rs_san << 7) | (rd_san << 2) | (op & 0b11)); +} + +// Emits a compressed register arithmetic instruction. These consist of: +// funct6 | rd | funct2 | rs | op +void EmitCompressedRegArith(CodeBuffer& buffer, uint32_t funct6, GPR rd, uint32_t funct2, GPR rs, uint32_t op) noexcept { + BISCUIT_ASSERT(IsValid3BitCompressedReg(rs)); + BISCUIT_ASSERT(IsValid3BitCompressedReg(rd)); + + const auto rd_san = CompressedRegTo3BitEncoding(rd); + const auto rs_san = CompressedRegTo3BitEncoding(rs); + buffer.Emit16(((funct6 & 0b111111) << 10) | (rd_san << 7) | ((funct2 & 0b11) << 5) | (rs_san << 2) | (op & 0b11)); +} + +// Emits a compressed store instruction. These consist of: +// funct3 | imm | rs1 | imm | rs2 | op +void EmitCompressedStore(CodeBuffer& buffer, uint32_t funct3, uint32_t imm, GPR rs1, Register rs2, uint32_t op) noexcept { + // This has the same format as a compressed load, with rs2 taking the place of rd. + // We can reuse the code we've already written to handle this. + EmitCompressedLoad(buffer, funct3, imm, rs1, rs2, op); +} + +// Emits a compressed wide immediate instruction. These consist of: +// funct3 | imm | rd | opcode +void EmitCompressedWideImmediate(CodeBuffer& buffer, uint32_t funct3, uint32_t imm, GPR rd, uint32_t op) noexcept { + BISCUIT_ASSERT(IsValid3BitCompressedReg(rd)); + const auto rd_sanitized = CompressedRegTo3BitEncoding(rd); + buffer.Emit16(((funct3 & 0b111) << 13) | ((imm & 0xFF) << 5) | (rd_sanitized << 2) | (op & 0b11)); +} +} // Anonymous namespace + +Assembler::Assembler(size_t capacity) + : m_buffer(capacity) {} + +Assembler::Assembler(uint8_t* buffer, size_t capacity) + : m_buffer(buffer, capacity) {} + +Assembler::~Assembler() = default; + +CodeBuffer& Assembler::GetCodeBuffer() { + return m_buffer; +} + +CodeBuffer Assembler::SwapCodeBuffer(CodeBuffer&& buffer) noexcept { + return std::exchange(m_buffer, std::move(buffer)); +} + +void Assembler::Bind(Label* label) { + BindToOffset(label, m_buffer.GetCursorOffset()); +} + +void Assembler::ADD(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b000, rd, 0b0110011); +} + +void Assembler::ADDI(GPR rd, GPR rs, int32_t imm) noexcept { + EmitIType(m_buffer, static_cast(imm), rs, 0b000, rd, 0b0010011); +} + +void Assembler::AND(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b111, rd, 0b0110011); +} + +void Assembler::ANDI(GPR rd, GPR rs, uint32_t imm) noexcept { + EmitIType(m_buffer, imm, rs, 0b111, rd, 0b0010011); +} + +void Assembler::AUIPC(GPR rd, int32_t imm) noexcept { + EmitUType(m_buffer, static_cast(imm), rd, 0b0010111); +} + +void Assembler::BEQ(GPR rs1, GPR rs2, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BEQ(rs1, rs2, static_cast(address)); +} + +void Assembler::BEQZ(GPR rs, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BEQZ(rs, static_cast(address)); +} + +void Assembler::BGE(GPR rs1, GPR rs2, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BGE(rs1, rs2, static_cast(address)); +} + +void Assembler::BGEU(GPR rs1, GPR rs2, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BGEU(rs1, rs2, static_cast(address)); +} + +void Assembler::BGEZ(GPR rs, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BGEZ(rs, static_cast(address)); +} + +void Assembler::BGT(GPR rs, GPR rt, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BGT(rs, rt, static_cast(address)); +} + +void Assembler::BGTU(GPR rs, GPR rt, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BGTU(rs, rt, static_cast(address)); +} + +void Assembler::BGTZ(GPR rs, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BGTZ(rs, static_cast(address)); +} + +void Assembler::BLE(GPR rs, GPR rt, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BLE(rs, rt, static_cast(address)); +} + +void Assembler::BLEU(GPR rs, GPR rt, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BLEU(rs, rt, static_cast(address)); +} + +void Assembler::BLEZ(GPR rs, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BLEZ(rs, static_cast(address)); +} + +void Assembler::BLT(GPR rs1, GPR rs2, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BLT(rs1, rs2, static_cast(address)); +} + +void Assembler::BLTU(GPR rs1, GPR rs2, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BLTU(rs1, rs2, static_cast(address)); +} + +void Assembler::BLTZ(GPR rs, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BLTZ(rs, static_cast(address)); +} + +void Assembler::BNE(GPR rs1, GPR rs2, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BNE(rs1, rs2, static_cast(address)); +} + +void Assembler::BNEZ(GPR rs, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BNEZ(rs, static_cast(address)); +} + +void Assembler::BEQ(GPR rs1, GPR rs2, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidBTypeImm(imm)); + EmitBType(m_buffer, static_cast(imm), rs2, rs1, 0b000, 0b1100011); +} + +void Assembler::BEQZ(GPR rs, int32_t imm) noexcept { + BEQ(rs, x0, imm); +} + +void Assembler::BGE(GPR rs1, GPR rs2, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidBTypeImm(imm)); + EmitBType(m_buffer, static_cast(imm), rs2, rs1, 0b101, 0b1100011); +} + +void Assembler::BGEU(GPR rs1, GPR rs2, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidBTypeImm(imm)); + EmitBType(m_buffer, static_cast(imm), rs2, rs1, 0b111, 0b1100011); +} + +void Assembler::BGEZ(GPR rs, int32_t imm) noexcept { + BGE(rs, x0, imm); +} + +void Assembler::BGT(GPR rs, GPR rt, int32_t imm) noexcept { + BLT(rt, rs, imm); +} + +void Assembler::BGTU(GPR rs, GPR rt, int32_t imm) noexcept { + BLTU(rt, rs, imm); +} + +void Assembler::BGTZ(GPR rs, int32_t imm) noexcept { + BLT(x0, rs, imm); +} + +void Assembler::BLE(GPR rs, GPR rt, int32_t imm) noexcept { + BGE(rt, rs, imm); +} + +void Assembler::BLEU(GPR rs, GPR rt, int32_t imm) noexcept { + BGEU(rt, rs, imm); +} + +void Assembler::BLEZ(GPR rs, int32_t imm) noexcept { + BGE(x0, rs, imm); +} + +void Assembler::BLT(GPR rs1, GPR rs2, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidBTypeImm(imm)); + EmitBType(m_buffer, static_cast(imm), rs2, rs1, 0b100, 0b1100011); +} + +void Assembler::BLTU(GPR rs1, GPR rs2, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidBTypeImm(imm)); + EmitBType(m_buffer, static_cast(imm), rs2, rs1, 0b110, 0b1100011); +} + +void Assembler::BLTZ(GPR rs, int32_t imm) noexcept { + BLT(rs, x0, imm); +} + +void Assembler::BNE(GPR rs1, GPR rs2, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidBTypeImm(imm)); + EmitBType(m_buffer, static_cast(imm), rs2, rs1, 0b001, 0b1100011); +} + +void Assembler::BNEZ(GPR rs, int32_t imm) noexcept { + BNE(x0, rs, imm); +} + +void Assembler::CALL(int32_t offset) noexcept { + const auto uimm = static_cast(offset); + const auto lower = uimm & 0xFFF; + const auto upper = (uimm & 0xFFFFF000) >> 12; + const auto needs_increment = (uimm & 0x800) != 0; + + // Sign-extend the lower portion if the MSB of it is set. + const auto new_lower = needs_increment ? static_cast(lower << 20) >> 20 + : static_cast(lower); + const auto new_upper = needs_increment ? upper + 1 : upper; + + AUIPC(x1, static_cast(new_upper)); + JALR(x1, new_lower, x1); +} + +void Assembler::EBREAK() noexcept { + m_buffer.Emit32(0x00100073); +} + +void Assembler::ECALL() noexcept { + m_buffer.Emit32(0x00000073); +} + +void Assembler::FENCE() noexcept { + FENCE(FenceOrder::IORW, FenceOrder::IORW); +} + +void Assembler::FENCE(FenceOrder pred, FenceOrder succ) noexcept { + EmitFENCE(m_buffer, 0b0000, pred, succ, x0, 0b000, x0, 0b0001111); +} + +void Assembler::FENCEI(GPR rd, GPR rs, uint32_t imm) noexcept { + m_buffer.Emit32(((imm & 0xFFF) << 20) | (rs.Index() << 15) | 0x1000U | (rd.Index() << 7) | 0b0001111); +} + +void Assembler::FENCETSO() noexcept { + EmitFENCE(m_buffer, 0b1000, FenceOrder::RW, FenceOrder::RW, x0, 0b000, x0, 0b0001111); +} + +void Assembler::J(Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BISCUIT_ASSERT(IsValidJTypeImm(address)); + J(static_cast(address)); +} + +void Assembler::JAL(Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BISCUIT_ASSERT(IsValidJTypeImm(address)); + JAL(static_cast(address)); +} + +void Assembler::JAL(GPR rd, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + BISCUIT_ASSERT(IsValidJTypeImm(address)); + JAL(rd, static_cast(address)); +} + +void Assembler::J(int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidJTypeImm(imm)); + JAL(x0, imm); +} + +void Assembler::JAL(int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidJTypeImm(imm)); + EmitJType(m_buffer, static_cast(imm), x1, 0b1101111); +} + +void Assembler::JAL(GPR rd, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidJTypeImm(imm)); + EmitJType(m_buffer, static_cast(imm), rd, 0b1101111); +} + +void Assembler::JALR(GPR rs) noexcept { + JALR(x1, 0, rs); +} + +void Assembler::JALR(GPR rd, int32_t imm, GPR rs1) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs1, 0b000, rd, 0b1100111); +} + +void Assembler::JR(GPR rs) noexcept { + JALR(x0, 0, rs); +} + +void Assembler::LB(GPR rd, int32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs, 0b000, rd, 0b0000011); +} + +void Assembler::LBU(GPR rd, int32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs, 0b100, rd, 0b0000011); +} + +void Assembler::LH(GPR rd, int32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs, 0b001, rd, 0b0000011); +} + +void Assembler::LHU(GPR rd, int32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs, 0b101, rd, 0b0000011); +} + +void Assembler::LI(GPR rd, uint32_t imm) noexcept { + const auto lower = imm & 0xFFF; + const auto upper = (imm & 0xFFFFF000) >> 12; + const auto simm = static_cast(imm); + + // If the immediate can fit within 12 bits, we only need to emit an ADDI. + if (IsValidSigned12BitImm(simm)) { + ADDI(rd, x0, static_cast(lower)); + } else { + const bool needs_increment = (lower & 0x800) != 0; + const auto upper_imm = needs_increment ? upper + 1 : upper; + + // Note that we add 1 to the upper portion of the immediate if the lower + // immediate's most significant bit is set. This is necessary, as ADDI + // sign-extends its 12-bit immediate before performing addition. + // + // In the event of the sign-extension, this means that we'll be adding + // an equivalent of "lower - 4096" to the upper immediate. + // + // We add 1 to the upper part of the immediate. the upper part's least + // significant bit is bit 12. Adding 1 to this bit is equivalent to adding + // 4096, which counteracts the sign-extension, preserving the value. + + LUI(rd, upper_imm); + ADDI(rd, rd, static_cast(lower)); + } +} + +void Assembler::LUI(GPR rd, uint32_t imm) noexcept { + EmitUType(m_buffer, imm, rd, 0b0110111); +} + +void Assembler::LW(GPR rd, int32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs, 0b010, rd, 0b0000011); +} + +void Assembler::MV(GPR rd, GPR rs) noexcept { + ADDI(rd, rs, 0); +} + +void Assembler::NEG(GPR rd, GPR rs) noexcept { + SUB(rd, x0, rs); +} + +void Assembler::NOP() noexcept { + ADDI(x0, x0, 0); +} + +void Assembler::NOT(GPR rd, GPR rs) noexcept { + XORI(rd, rs, UINT32_MAX); +} + +void Assembler::OR(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b110, rd, 0b0110011); +} + +void Assembler::ORI(GPR rd, GPR rs, uint32_t imm) noexcept { + EmitIType(m_buffer, imm, rs, 0b110, rd, 0b0010011); +} + +void Assembler::PAUSE() noexcept { + m_buffer.Emit32(0x0100000F); +} + +void Assembler::RET() noexcept { + JALR(x0, 0, x1); +} + +void Assembler::SB(GPR rs2, int32_t imm, GPR rs1) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitSType(m_buffer, static_cast(imm), rs2, rs1, 0b000, 0b0100011); +} + +void Assembler::SEQZ(GPR rd, GPR rs) noexcept { + SLTIU(rd, rs, 1); +} + +void Assembler::SGTZ(GPR rd, GPR rs) noexcept { + SLT(rd, x0, rs); +} + +void Assembler::SH(GPR rs2, int32_t imm, GPR rs1) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitSType(m_buffer, static_cast(imm), rs2, rs1, 0b001, 0b0100011); +} + +void Assembler::SLL(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b001, rd, 0b0110011); +} + +void Assembler::SLLI(GPR rd, GPR rs, uint32_t shift) noexcept { + BISCUIT_ASSERT(shift <= 31); + EmitIType(m_buffer, shift & 0x1F, rs, 0b001, rd, 0b0010011); +} + +void Assembler::SLT(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b010, rd, 0b0110011); +} + +void Assembler::SLTI(GPR rd, GPR rs, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs, 0b010, rd, 0b0010011); +} + +void Assembler::SLTIU(GPR rd, GPR rs, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs, 0b011, rd, 0b0010011); +} + +void Assembler::SLTU(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b011, rd, 0b0110011); +} + +void Assembler::SLTZ(GPR rd, GPR rs) noexcept { + SLT(rd, rs, x0); +} + +void Assembler::SNEZ(GPR rd, GPR rs) noexcept { + SLTU(rd, x0, rs); +} + +void Assembler::SRA(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0100000, rhs, lhs, 0b101, rd, 0b0110011); +} + +void Assembler::SRAI(GPR rd, GPR rs, uint32_t shift) noexcept { + BISCUIT_ASSERT(shift <= 31); + EmitIType(m_buffer, (0b0100000 << 5) | (shift & 0x1F), rs, 0b101, rd, 0b0010011); +} + +void Assembler::SRL(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b101, rd, 0b0110011); +} + +void Assembler::SRLI(GPR rd, GPR rs, uint32_t shift) noexcept { + BISCUIT_ASSERT(shift <= 31); + EmitIType(m_buffer, shift & 0x1F, rs, 0b101, rd, 0b0010011); +} + +void Assembler::SUB(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0100000, rhs, lhs, 0b000, rd, 0b0110011); +} + +void Assembler::SW(GPR rs2, int32_t imm, GPR rs1) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitSType(m_buffer, static_cast(imm), rs2, rs1, 0b010, 0b0100011); +} + +void Assembler::XOR(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b100, rd, 0b0110011); +} + +void Assembler::XORI(GPR rd, GPR rs, uint32_t imm) noexcept { + EmitIType(m_buffer, imm, rs, 0b100, rd, 0b0010011); +} + +// RV64I Instructions + +void Assembler::ADDIW(GPR rd, GPR rs, int32_t imm) noexcept { + EmitIType(m_buffer, static_cast(imm), rs, 0b000, rd, 0b0011011); +} + +void Assembler::ADDW(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b000, rd, 0b0111011); +} + +void Assembler::LD(GPR rd, int32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs, 0b011, rd, 0b0000011); +} + +void Assembler::LWU(GPR rd, int32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitIType(m_buffer, static_cast(imm), rs, 0b110, rd, 0b0000011); +} + +void Assembler::SD(GPR rs2, int32_t imm, GPR rs1) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(imm)); + EmitSType(m_buffer, static_cast(imm), rs2, rs1, 0b011, 0b0100011); +} + +void Assembler::SRAI64(GPR rd, GPR rs, uint32_t shift) noexcept { + BISCUIT_ASSERT(shift <= 63); + EmitIType(m_buffer, (0b0100000 << 5) | (shift & 0x3F), rs, 0b101, rd, 0b0010011); +} +void Assembler::SLLI64(GPR rd, GPR rs, uint32_t shift) noexcept { + BISCUIT_ASSERT(shift <= 63); + EmitIType(m_buffer, shift & 0x3F, rs, 0b001, rd, 0b0010011); +} +void Assembler::SRLI64(GPR rd, GPR rs, uint32_t shift) noexcept { + BISCUIT_ASSERT(shift <= 63); + EmitIType(m_buffer, shift & 0x3F, rs, 0b101, rd, 0b0010011); +} + +void Assembler::SLLIW(GPR rd, GPR rs, uint32_t shift) noexcept { + BISCUIT_ASSERT(shift <= 31); + EmitIType(m_buffer, shift & 0x1F, rs, 0b001, rd, 0b0011011); +} +void Assembler::SRAIW(GPR rd, GPR rs, uint32_t shift) noexcept { + BISCUIT_ASSERT(shift <= 31); + EmitIType(m_buffer, (0b0100000 << 5) | (shift & 0x1F), rs, 0b101, rd, 0b0011011); +} +void Assembler::SRLIW(GPR rd, GPR rs, uint32_t shift) noexcept { + BISCUIT_ASSERT(shift <= 31); + EmitIType(m_buffer, shift & 0x1F, rs, 0b101, rd, 0b0011011); +} + +void Assembler::SLLW(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b001, rd, 0b0111011); +} +void Assembler::SRAW(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0100000, rhs, lhs, 0b101, rd, 0b0111011); +} +void Assembler::SRLW(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0000000, rhs, lhs, 0b101, rd, 0b0111011); +} + +void Assembler::SUBW(GPR rd, GPR lhs, GPR rhs) noexcept { + EmitRType(m_buffer, 0b0100000, rhs, lhs, 0b000, rd, 0b0111011); +} + +// Zicsr Extension Instructions + +void Assembler::CSRRC(GPR rd, CSR csr, GPR rs) noexcept { + EmitIType(m_buffer, static_cast(csr), rs, 0b011, rd, 0b1110011); +} +void Assembler::CSRRCI(GPR rd, CSR csr, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 0x1F); + EmitIType(m_buffer, static_cast(csr), GPR{imm & 0x1F}, 0b111, rd, 0b1110011); +} +void Assembler::CSRRS(GPR rd, CSR csr, GPR rs) noexcept { + EmitIType(m_buffer, static_cast(csr), rs, 0b010, rd, 0b1110011); +} +void Assembler::CSRRSI(GPR rd, CSR csr, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 0x1F); + EmitIType(m_buffer, static_cast(csr), GPR{imm & 0x1F}, 0b110, rd, 0b1110011); +} +void Assembler::CSRRW(GPR rd, CSR csr, GPR rs) noexcept { + EmitIType(m_buffer, static_cast(csr), rs, 0b001, rd, 0b1110011); +} +void Assembler::CSRRWI(GPR rd, CSR csr, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 0x1F); + EmitIType(m_buffer, static_cast(csr), GPR{imm & 0x1F}, 0b101, rd, 0b1110011); +} + +void Assembler::CSRR(GPR rd, CSR csr) noexcept { + CSRRS(rd, csr, x0); +} +void Assembler::CSWR(CSR csr, GPR rs) noexcept { + CSRRW(x0, csr, rs); +} + +void Assembler::CSRS(CSR csr, GPR rs) noexcept { + CSRRS(x0, csr, rs); +} +void Assembler::CSRC(CSR csr, GPR rs) noexcept { + CSRRC(x0, csr, rs); +} + +void Assembler::CSRCI(CSR csr, uint32_t imm) noexcept { + CSRRCI(x0, csr, imm); +} +void Assembler::CSRSI(CSR csr, uint32_t imm) noexcept { + CSRRSI(x0, csr, imm); +} +void Assembler::CSRWI(CSR csr, uint32_t imm) noexcept { + CSRRWI(x0, csr, imm); +} + +void Assembler::FRCSR(GPR rd) noexcept { + CSRRS(rd, CSR::FCSR, x0); +} +void Assembler::FSCSR(GPR rd, GPR rs) noexcept { + CSRRW(rd, CSR::FCSR, rs); +} +void Assembler::FSCSR(GPR rs) noexcept { + CSRRW(x0, CSR::FCSR, rs); +} + +void Assembler::FRRM(GPR rd) noexcept { + CSRRS(rd, CSR::FRM, x0); +} +void Assembler::FSRM(GPR rd, GPR rs) noexcept { + CSRRW(rd, CSR::FRM, rs); +} +void Assembler::FSRM(GPR rs) noexcept { + CSRRW(x0, CSR::FRM, rs); +} + +void Assembler::FSRMI(GPR rd, uint32_t imm) noexcept { + CSRRWI(rd, CSR::FRM, imm); +} +void Assembler::FSRMI(uint32_t imm) noexcept { + CSRRWI(x0, CSR::FRM, imm); +} + +void Assembler::FRFLAGS(GPR rd) noexcept { + CSRRS(rd, CSR::FFlags, x0); +} +void Assembler::FSFLAGS(GPR rd, GPR rs) noexcept { + CSRRW(rd, CSR::FFlags, rs); +} +void Assembler::FSFLAGS(GPR rs) noexcept { + CSRRW(x0, CSR::FFlags, rs); +} + +void Assembler::FSFLAGSI(GPR rd, uint32_t imm) noexcept { + CSRRWI(rd, CSR::FFlags, imm); +} +void Assembler::FSFLAGSI(uint32_t imm) noexcept { + CSRRWI(x0, CSR::FFlags, imm); +} + +void Assembler::RDCYCLE(GPR rd) noexcept { + CSRRS(rd, CSR::Cycle, x0); +} +void Assembler::RDCYCLEH(GPR rd) noexcept { + CSRRS(rd, CSR::CycleH, x0); +} + +void Assembler::RDINSTRET(GPR rd) noexcept { + CSRRS(rd, CSR::InstRet, x0); +} +void Assembler::RDINSTRETH(GPR rd) noexcept { + CSRRS(rd, CSR::InstRetH, x0); +} + +void Assembler::RDTIME(GPR rd) noexcept { + CSRRS(rd, CSR::Time, x0); +} +void Assembler::RDTIMEH(GPR rd) noexcept { + CSRRS(rd, CSR::TimeH, x0); +} + +// Zihintntl Extension Instructions + +void Assembler::C_NTL_ALL() noexcept { + C_ADD(x0, x5); +} +void Assembler::C_NTL_S1() noexcept { + C_ADD(x0, x4); +} +void Assembler::C_NTL_P1() noexcept { + C_ADD(x0, x2); +} +void Assembler::C_NTL_PALL() noexcept { + C_ADD(x0, x3); +} +void Assembler::NTL_ALL() noexcept { + ADD(x0, x0, x5); +} +void Assembler::NTL_S1() noexcept { + ADD(x0, x0, x4); +} +void Assembler::NTL_P1() noexcept { + ADD(x0, x0, x2); +} +void Assembler::NTL_PALL() noexcept { + ADD(x0, x0, x3); +} + +// RV32M Extension Instructions + +void Assembler::DIV(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b100, rd, 0b0110011); +} +void Assembler::DIVU(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b101, rd, 0b0110011); +} +void Assembler::MUL(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b000, rd, 0b0110011); +} +void Assembler::MULH(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b001, rd, 0b0110011); +} +void Assembler::MULHSU(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b010, rd, 0b0110011); +} +void Assembler::MULHU(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b011, rd, 0b0110011); +} +void Assembler::REM(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b110, rd, 0b0110011); +} +void Assembler::REMU(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b111, rd, 0b0110011); +} + +// RV64M Extension Instructions + +void Assembler::DIVW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b100, rd, 0b0111011); +} +void Assembler::DIVUW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b101, rd, 0b0111011); +} +void Assembler::MULW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b000, rd, 0b0111011); +} +void Assembler::REMW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b110, rd, 0b0111011); +} +void Assembler::REMUW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, 0b111, rd, 0b0111011); +} + +// RV32A Extension Instructions + +void Assembler::AMOADD_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b00000, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} +void Assembler::AMOAND_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b01100, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} +void Assembler::AMOMAX_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b10100, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} +void Assembler::AMOMAXU_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b11100, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} +void Assembler::AMOMIN_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b10000, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} +void Assembler::AMOMINU_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b11000, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} +void Assembler::AMOOR_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b01000, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} +void Assembler::AMOSWAP_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b00001, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} +void Assembler::AMOXOR_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b00100, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} +void Assembler::LR_W(Ordering ordering, GPR rd, GPR rs) noexcept { + EmitAtomic(m_buffer, 0b00010, ordering, x0, rs, 0b010, rd, 0b0101111); +} +void Assembler::SC_W(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b00011, ordering, rs2, rs1, 0b010, rd, 0b0101111); +} + +// RV64A Extension Instructions + +void Assembler::AMOADD_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b00000, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} +void Assembler::AMOAND_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b01100, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} +void Assembler::AMOMAX_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b10100, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} +void Assembler::AMOMAXU_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b11100, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} +void Assembler::AMOMIN_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b10000, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} +void Assembler::AMOMINU_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b11000, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} +void Assembler::AMOOR_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b01000, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} +void Assembler::AMOSWAP_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b00001, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} +void Assembler::AMOXOR_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b00100, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} +void Assembler::LR_D(Ordering ordering, GPR rd, GPR rs) noexcept { + EmitAtomic(m_buffer, 0b00010, ordering, x0, rs, 0b011, rd, 0b0101111); +} +void Assembler::SC_D(Ordering ordering, GPR rd, GPR rs2, GPR rs1) noexcept { + EmitAtomic(m_buffer, 0b00011, ordering, rs2, rs1, 0b011, rd, 0b0101111); +} + +// RV32F Extension Instructions + +void Assembler::FADD_S(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0000000, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FCLASS_S(GPR rd, FPR rs1) noexcept { + EmitRType(m_buffer, 0b1110000, f0, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FCVT_S_W(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101000, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_S_WU(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101000, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_W_S(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100000, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_WU_S(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100000, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FDIV_S(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0001100, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FEQ_S(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010000, rs2, rs1, 0b010, rd, 0b1010011); +} +void Assembler::FLE_S(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010000, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FLT_S(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010000, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FLW(FPR rd, int32_t offset, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + EmitIType(m_buffer, static_cast(offset), rs, 0b010, rd, 0b0000111); +} +void Assembler::FMADD_S(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b00, rs2, rs1, rmode, rd, 0b1000011); +} +void Assembler::FMAX_S(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010100, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FMIN_S(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010100, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FMSUB_S(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b00, rs2, rs1, rmode, rd, 0b1000111); +} +void Assembler::FMUL_S(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0001000, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FMV_W_X(FPR rd, GPR rs1) noexcept { + EmitRType(m_buffer, 0b1111000, f0, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FMV_X_W(GPR rd, FPR rs1) noexcept { + EmitRType(m_buffer, 0b1110000, f0, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FNMADD_S(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b00, rs2, rs1, rmode, rd, 0b1001111); +} +void Assembler::FNMSUB_S(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b00, rs2, rs1, rmode, rd, 0b1001011); +} +void Assembler::FSGNJ_S(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010000, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FSGNJN_S(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010000, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FSGNJX_S(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010000, rs2, rs1, 0b010, rd, 0b1010011); +} +void Assembler::FSQRT_S(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0101100, f0, rs1, rmode, rd, 0b1010011); +} +void Assembler::FSUB_S(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0000100, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FSW(FPR rs2, int32_t offset, GPR rs1) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + EmitSType(m_buffer, static_cast(offset), rs2, rs1, 0b010, 0b0100111); +} + +void Assembler::FABS_S(FPR rd, FPR rs) noexcept { + FSGNJX_S(rd, rs, rs); +} +void Assembler::FMV_S(FPR rd, FPR rs) noexcept { + FSGNJ_S(rd, rs, rs); +} +void Assembler::FNEG_S(FPR rd, FPR rs) noexcept { + FSGNJN_S(rd, rs, rs); +} + +// RV64F Extension Instructions + +void Assembler::FCVT_L_S(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100000, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_LU_S(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100000, f3, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_S_L(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101000, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_S_LU(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101000, f3, rs1, static_cast(rmode), rd, 0b1010011); +} + +// RV32D Extension Instructions + +void Assembler::FADD_D(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0000001, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FCLASS_D(GPR rd, FPR rs1) noexcept { + EmitRType(m_buffer, 0b1110001, f0, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FCVT_D_W(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101001, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_D_WU(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101001, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_W_D(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100001, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_WU_D(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100001, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_D_S(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100001, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_S_D(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100000, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FDIV_D(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0001101, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FEQ_D(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010001, rs2, rs1, 0b010, rd, 0b1010011); +} +void Assembler::FLE_D(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010001, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FLT_D(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010001, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FLD(FPR rd, int32_t offset, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + EmitIType(m_buffer, static_cast(offset), rs, 0b011, rd, 0b0000111); +} +void Assembler::FMADD_D(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b01, rs2, rs1, rmode, rd, 0b1000011); +} +void Assembler::FMAX_D(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010101, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FMIN_D(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010101, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FMSUB_D(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b01, rs2, rs1, rmode, rd, 0b1000111); +} +void Assembler::FMUL_D(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0001001, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FNMADD_D(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b01, rs2, rs1, rmode, rd, 0b1001111); +} +void Assembler::FNMSUB_D(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b01, rs2, rs1, rmode, rd, 0b1001011); +} +void Assembler::FSGNJ_D(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010001, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FSGNJN_D(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010001, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FSGNJX_D(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010001, rs2, rs1, 0b010, rd, 0b1010011); +} +void Assembler::FSQRT_D(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0101101, f0, rs1, rmode, rd, 0b1010011); +} +void Assembler::FSUB_D(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0000101, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FSD(FPR rs2, int32_t offset, GPR rs1) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + EmitSType(m_buffer, static_cast(offset), rs2, rs1, 0b011, 0b0100111); +} + +void Assembler::FABS_D(FPR rd, FPR rs) noexcept { + FSGNJX_D(rd, rs, rs); +} +void Assembler::FMV_D(FPR rd, FPR rs) noexcept { + FSGNJ_D(rd, rs, rs); +} +void Assembler::FNEG_D(FPR rd, FPR rs) noexcept { + FSGNJN_D(rd, rs, rs); +} + +// RV64D Extension Instructions + +void Assembler::FCVT_L_D(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100001, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_LU_D(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100001, f3, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_D_L(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101001, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_D_LU(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101001, f3, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FMV_D_X(FPR rd, GPR rs1) noexcept { + EmitRType(m_buffer, 0b1111001, f0, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FMV_X_D(GPR rd, FPR rs1) noexcept { + EmitRType(m_buffer, 0b1110001, f0, rs1, 0b000, rd, 0b1010011); +} + +// RV32Q Extension Instructions + +void Assembler::FADD_Q(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0000011, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FCLASS_Q(GPR rd, FPR rs1) noexcept { + EmitRType(m_buffer, 0b1110011, f0, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FCVT_Q_W(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101011, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_Q_WU(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101011, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_W_Q(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100011, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_WU_Q(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100011, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_Q_D(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100011, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_D_Q(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100001, f3, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_Q_S(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100011, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_S_Q(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100000, f3, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FDIV_Q(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0001111, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FEQ_Q(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010011, rs2, rs1, 0b010, rd, 0b1010011); +} +void Assembler::FLE_Q(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010011, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FLT_Q(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010011, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FLQ(FPR rd, int32_t offset, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + EmitIType(m_buffer, static_cast(offset), rs, 0b100, rd, 0b0000111); +} +void Assembler::FMADD_Q(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b11, rs2, rs1, rmode, rd, 0b1000011); +} +void Assembler::FMAX_Q(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010111, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FMIN_Q(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010111, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FMSUB_Q(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b11, rs2, rs1, rmode, rd, 0b1000111); +} +void Assembler::FMUL_Q(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0001011, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FNMADD_Q(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b11, rs2, rs1, rmode, rd, 0b1001111); +} +void Assembler::FNMSUB_Q(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b11, rs2, rs1, rmode, rd, 0b1001011); +} +void Assembler::FSGNJ_Q(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010011, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FSGNJN_Q(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010011, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FSGNJX_Q(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010011, rs2, rs1, 0b010, rd, 0b1010011); +} +void Assembler::FSQRT_Q(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0101111, f0, rs1, rmode, rd, 0b1010011); +} +void Assembler::FSUB_Q(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0000111, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FSQ(FPR rs2, int32_t offset, GPR rs1) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + EmitSType(m_buffer, static_cast(offset), rs2, rs1, 0b100, 0b0100111); +} + +void Assembler::FABS_Q(FPR rd, FPR rs) noexcept { + FSGNJX_Q(rd, rs, rs); +} +void Assembler::FMV_Q(FPR rd, FPR rs) noexcept { + FSGNJ_Q(rd, rs, rs); +} +void Assembler::FNEG_Q(FPR rd, FPR rs) noexcept { + FSGNJN_Q(rd, rs, rs); +} + +// RV64Q Extension Instructions + +void Assembler::FCVT_L_Q(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100011, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_LU_Q(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100011, f3, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_Q_L(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101011, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_Q_LU(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101011, f3, rs1, static_cast(rmode), rd, 0b1010011); +} + +// RV32Zfh Extension Instructions + +void Assembler::FADD_H(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0000010, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FCLASS_H(GPR rd, FPR rs1) noexcept { + EmitRType(m_buffer, 0b1110010, f0, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FCVT_D_H(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100001, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_H_D(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100010, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_H_Q(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100010, f3, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_H_S(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100010, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_H_W(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101010, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_H_WU(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101010, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_Q_H(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100011, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_S_H(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0100000, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_W_H(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100010, f0, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_WU_H(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100010, f1, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FDIV_H(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0001110, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FEQ_H(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010010, rs2, rs1, 0b010, rd, 0b1010011); +} +void Assembler::FLE_H(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010010, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FLH(FPR rd, int32_t offset, GPR rs) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + EmitIType(m_buffer, static_cast(offset), rs, 0b001, rd, 0b0000111); +} +void Assembler::FLT_H(GPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b1010010, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FMADD_H(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b10, rs2, rs1, rmode, rd, 0b1000011); +} +void Assembler::FMAX_H(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010110, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FMIN_H(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010110, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FMSUB_H(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b10, rs2, rs1, rmode, rd, 0b1000111); +} +void Assembler::FMUL_H(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0001010, rs2, rs1, rmode, rd, 0b1010011); +} +void Assembler::FMV_H_X(FPR rd, GPR rs1) noexcept { + EmitRType(m_buffer, 0b1111010, f0, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FMV_X_H(GPR rd, FPR rs1) noexcept { + EmitRType(m_buffer, 0b1110010, f0, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FNMADD_H(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b10, rs2, rs1, rmode, rd, 0b1001111); +} +void Assembler::FNMSUB_H(FPR rd, FPR rs1, FPR rs2, FPR rs3, RMode rmode) noexcept { + EmitR4Type(m_buffer, rs3, 0b10, rs2, rs1, rmode, rd, 0b1001011); +} +void Assembler::FSGNJ_H(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010010, rs2, rs1, 0b000, rd, 0b1010011); +} +void Assembler::FSGNJN_H(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010010, rs2, rs1, 0b001, rd, 0b1010011); +} +void Assembler::FSGNJX_H(FPR rd, FPR rs1, FPR rs2) noexcept { + EmitRType(m_buffer, 0b0010010, rs2, rs1, 0b010, rd, 0b1010011); +} +void Assembler::FSH(FPR rs2, int32_t offset, GPR rs1) noexcept { + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + EmitSType(m_buffer, static_cast(offset), rs2, rs1, 0b001, 0b0100111); +} +void Assembler::FSQRT_H(FPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0101110, f0, rs1, rmode, rd, 0b1010011); +} +void Assembler::FSUB_H(FPR rd, FPR rs1, FPR rs2, RMode rmode) noexcept { + EmitRType(m_buffer, 0b0000110, rs2, rs1, rmode, rd, 0b1010011); +} + +// RV64Zfh Extension Instructions + +void Assembler::FCVT_L_H(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100010, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_LU_H(GPR rd, FPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1100010, f3, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_H_L(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101010, f2, rs1, static_cast(rmode), rd, 0b1010011); +} +void Assembler::FCVT_H_LU(FPR rd, GPR rs1, RMode rmode) noexcept { + EmitRType(m_buffer, 0b1101010, f3, rs1, static_cast(rmode), rd, 0b1010011); +} + +// RVB Extension Instructions + +void Assembler::ADDUW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000100, rs2, rs1, 0b000, rd, 0b0111011); +} + +void Assembler::ANDN(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0100000, rs2, rs1, 0b111, rd, 0b0110011); +} + +void Assembler::BCLR(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0100100, rs2, rs1, 0b001, rd, 0b0110011); +} + +void Assembler::BCLRI(GPR rd, GPR rs, uint32_t bit) noexcept { + BISCUIT_ASSERT(bit <= 63); + const auto imm = (0b010010U << 6) | bit; + EmitIType(m_buffer, imm, rs, 0b001, rd, 0b0010011); +} + +void Assembler::BEXT(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0100100, rs2, rs1, 0b101, rd, 0b0110011); +} + +void Assembler::BEXTI(GPR rd, GPR rs, uint32_t bit) noexcept { + BISCUIT_ASSERT(bit <= 63); + const auto imm = (0b010010U << 6) | bit; + EmitIType(m_buffer, imm, rs, 0b101, rd, 0b0010011); +} + +void Assembler::BINV(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0110100, rs2, rs1, 0b001, rd, 0b0110011); +} + +void Assembler::BINVI(GPR rd, GPR rs, uint32_t bit) noexcept { + BISCUIT_ASSERT(bit <= 63); + const auto imm = (0b011010U << 6) | bit; + EmitIType(m_buffer, imm, rs, 0b001, rd, 0b0010011); +} + +void Assembler::CLMUL(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000101, rs2, rs1, 0b001, rd, 0b0110011); +} + +void Assembler::CLMULH(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000101, rs2, rs1, 0b011, rd, 0b0110011); +} + +void Assembler::CLMULR(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000101, rs2, rs1, 0b010, rd, 0b0110011); +} + +void Assembler::CLZ(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011000000000, rs, 0b001, rd, 0b0010011); +} + +void Assembler::CLZW(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011000000000, rs, 0b001, rd, 0b0011011); +} + +void Assembler::CPOP(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011000000010, rs, 0b001, rd, 0b0010011); +} + +void Assembler::CPOPW(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011000000010, rs, 0b001, rd, 0b0011011); +} + +void Assembler::CTZ(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011000000001, rs, 0b001, rd, 0b0010011); +} + +void Assembler::CTZW(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011000000001, rs, 0b001, rd, 0b0011011); +} + +void Assembler::MAX(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000101, rs2, rs1, 0b110, rd, 0b0110011); +} + +void Assembler::MAXU(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000101, rs2, rs1, 0b111, rd, 0b0110011); +} + +void Assembler::MIN(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000101, rs2, rs1, 0b100, rd, 0b0110011); +} + +void Assembler::MINU(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000101, rs2, rs1, 0b101, rd, 0b0110011); +} + +void Assembler::ORCB(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b001010000111, rs, 0b101, rd, 0b0010011); +} + +void Assembler::ORN(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0100000, rs2, rs1, 0b110, rd, 0b0110011); +} + +void Assembler::PACK(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000100, rs2, rs1, 0b100, rd, 0b0110011); +} + +void Assembler::PACKH(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000100, rs2, rs1, 0b111, rd, 0b0110011); +} + +void Assembler::PACKW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0000100, rs2, rs1, 0b100, rd, 0b0111011); +} + +void Assembler::REV8_32(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011010011000, rs, 0b101, rd, 0b0010011); +} + +void Assembler::REV8_64(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011010111000, rs, 0b101, rd, 0b0010011); +} + +void Assembler::REV_B(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011010000111, rs, 0b101, rd, 0b0010011); +} + +void Assembler::ROL(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0110000, rs2, rs1, 0b001, rd, 0b0110011); +} + +void Assembler::ROLW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0110000, rs2, rs1, 0b001, rd, 0b0111011); +} + +void Assembler::ROR(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0110000, rs2, rs1, 0b101, rd, 0b0110011); +} + +void Assembler::RORI(GPR rd, GPR rs, uint32_t rotate_amount) noexcept { + BISCUIT_ASSERT(rotate_amount <= 63); + const auto imm = (0b011000U << 6) | rotate_amount; + EmitIType(m_buffer, imm, rs, 0b101, rd, 0b0010011); +} + +void Assembler::RORIW(GPR rd, GPR rs, uint32_t rotate_amount) noexcept { + BISCUIT_ASSERT(rotate_amount <= 63); + const auto imm = (0b011000U << 6) | rotate_amount; + EmitIType(m_buffer, imm, rs, 0b101, rd, 0b0011011); +} + +void Assembler::RORW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0110000, rs2, rs1, 0b101, rd, 0b0111011); +} + +void Assembler::SEXTB(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011000000100, rs, 0b001, rd, 0b0010011); +} + +void Assembler::SEXTH(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b011000000101, rs, 0b001, rd, 0b0010011); +} + +void Assembler::SH1ADD(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010000, rs2, rs1, 0b010, rd, 0b0110011); +} + +void Assembler::SH1ADDUW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010000, rs2, rs1, 0b010, rd, 0b0111011); +} + +void Assembler::SH2ADD(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010000, rs2, rs1, 0b100, rd, 0b0110011); +} + +void Assembler::SH2ADDUW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010000, rs2, rs1, 0b100, rd, 0b0111011); +} + +void Assembler::SH3ADD(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010000, rs2, rs1, 0b110, rd, 0b0110011); +} + +void Assembler::SH3ADDUW(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010000, rs2, rs1, 0b110, rd, 0b0111011); +} + +void Assembler::SLLIUW(GPR rd, GPR rs, uint32_t shift_amount) noexcept { + BISCUIT_ASSERT(shift_amount <= 63); + const auto imm = (0b000010U << 6) | shift_amount; + EmitIType(m_buffer, imm, rs, 0b001, rd, 0b0011011); +} + +void Assembler::UNZIP(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b000010011111, rs, 0b101, rd, 0b0010011); +} + +void Assembler::XNOR(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0100000, rs2, rs1, 0b100, rd, 0b0110011); +} + +void Assembler::XPERMB(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010100, rs2, rs1, 0b100, rd, 0b0110011); +} + +void Assembler::XPERMN(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010100, rs2, rs1, 0b010, rd, 0b0110011); +} + +void Assembler::ZEXTH_32(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b000010000000, rs, 0b100, rd, 0b0110011); +} + +void Assembler::ZEXTH_64(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b000010000000, rs, 0b100, rd, 0b0111011); +} + +void Assembler::ZEXTW(GPR rd, GPR rs) noexcept { + ADDUW(rd, rs, x0); +} + +void Assembler::ZIP(GPR rd, GPR rs) noexcept { + EmitIType(m_buffer, 0b000010011110, rs, 0b001, rd, 0b0010011); +} + +void Assembler::BSET(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010100, rs2, rs1, 0b001, rd, 0b0110011); +} + +void Assembler::BSETI(GPR rd, GPR rs, uint32_t bit) noexcept { + BISCUIT_ASSERT(bit <= 63); + const auto imm = (0b001010U << 6) | bit; + EmitIType(m_buffer, imm, rs, 0b001, rd, 0b0110011); +} + +// RVC Extension Instructions + +void Assembler::C_ADD(GPR rd, GPR rs) noexcept { + BISCUIT_ASSERT(rs != x0); + m_buffer.Emit16(0x9002 | (rd.Index() << 7) | (rs.Index() << 2)); +} + +void Assembler::C_ADDI(GPR rd, int32_t imm) noexcept { + BISCUIT_ASSERT(imm != 0); + BISCUIT_ASSERT(IsValidSigned6BitImm(imm)); + EmitCompressedImmediate(m_buffer, 0b000, static_cast(imm), rd, 0b01); +} + +void Assembler::C_ADDIW(GPR rd, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidSigned6BitImm(imm)); + EmitCompressedImmediate(m_buffer, 0b001, static_cast(imm), rd, 0b01); +} + +void Assembler::C_ADDI4SPN(GPR rd, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm != 0); + BISCUIT_ASSERT(imm <= 1020); + BISCUIT_ASSERT(imm % 4 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x030) << 2) | + ((imm & 0x3C0) >> 4) | + ((imm & 0x004) >> 1) | + ((imm & 0x008) >> 3); + // clang-format on + + EmitCompressedWideImmediate(m_buffer, 0b000, new_imm, rd, 0b00); +} + +void Assembler::C_ADDW(GPR rd, GPR rs) noexcept { + EmitCompressedRegArith(m_buffer, 0b100111, rd, 0b01, rs, 0b01); +} + +void Assembler::C_ADDI16SP(int32_t imm) noexcept { + BISCUIT_ASSERT(imm != 0); + BISCUIT_ASSERT(imm >= -512 && imm <= 496); + BISCUIT_ASSERT(imm % 16 == 0); + + // clang-format off + const auto uimm = static_cast(imm); + const auto new_imm = ((uimm & 0x020) >> 3) | + ((uimm & 0x180) >> 4) | + ((uimm & 0x040) >> 1) | + ((uimm & 0x010) << 2) | + ((uimm & 0x200) << 3); + // clang-format on + + m_buffer.Emit16(0x6000U | new_imm | (x2.Index() << 7) | 0b01U); +} + +void Assembler::C_AND(GPR rd, GPR rs) noexcept { + EmitCompressedRegArith(m_buffer, 0b100011, rd, 0b11, rs, 0b01); +} + +void Assembler::C_ANDI(GPR rd, uint32_t imm) noexcept { + BISCUIT_ASSERT(IsValid3BitCompressedReg(rd)); + + constexpr auto base = 0x8801U; + const auto shift_enc = ((imm & 0b11111) << 2) | ((imm & 0b100000) << 7); + const auto reg = CompressedRegTo3BitEncoding(rd); + + m_buffer.Emit16(base | shift_enc | (reg << 7)); +} + +void Assembler::C_BEQZ(GPR rs, int32_t offset) noexcept { + EmitCompressedBranch(m_buffer, 0b110, offset, rs, 0b01); +} + +void Assembler::C_BEQZ(GPR rs, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + C_BEQZ(rs, static_cast(address)); +} + +void Assembler::C_BNEZ(GPR rs, int32_t offset) noexcept { + EmitCompressedBranch(m_buffer, 0b111, offset, rs, 0b01); +} + +void Assembler::C_BNEZ(GPR rs, Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + C_BNEZ(rs, static_cast(address)); +} + +void Assembler::C_EBREAK() noexcept { + m_buffer.Emit16(0x9002); +} + +void Assembler::C_FLD(FPR rd, uint32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(imm <= 248); + BISCUIT_ASSERT(imm % 8 == 0); + + EmitCompressedLoad(m_buffer, 0b001, imm, rs, rd, 0b00); +} + +void Assembler::C_FLDSP(FPR rd, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 504); + BISCUIT_ASSERT(imm % 8 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x018) << 2) | + ((imm & 0x1C0) >> 4) | + ((imm & 0x020) << 7); + // clang-format on + + m_buffer.Emit16(0x2002U | (rd.Index() << 7) | new_imm); +} + +void Assembler::C_FLW(FPR rd, uint32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(imm <= 124); + BISCUIT_ASSERT(imm % 4 == 0); + + imm &= 0x7C; + const auto new_imm = ((imm & 0b0100) << 5) | (imm & 0x78); + EmitCompressedLoad(m_buffer, 0b011, new_imm, rs, rd, 0b00); +} + +void Assembler::C_FLWSP(FPR rd, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 252); + BISCUIT_ASSERT(imm % 4 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x020) << 7) | + ((imm & 0x0C0) >> 4) | + ((imm & 0x01C) << 2); + // clang-format on + + m_buffer.Emit16(0x6002U | (rd.Index() << 7) | new_imm); +} + +void Assembler::C_FSD(FPR rs2, uint32_t imm, GPR rs1) noexcept { + BISCUIT_ASSERT(imm <= 248); + BISCUIT_ASSERT(imm % 8 == 0); + + EmitCompressedStore(m_buffer, 0b101, imm, rs1, rs2, 0b00); +} + +void Assembler::C_FSDSP(FPR rs, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 504); + BISCUIT_ASSERT(imm % 8 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x038) << 7) | + ((imm & 0x1C0) << 1); + // clang-format on + + m_buffer.Emit16(0xA002U | (rs.Index() << 2) | new_imm); +} + +void Assembler::C_J(Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + C_J(static_cast(address)); +} + +void Assembler::C_J(int32_t offset) noexcept { + EmitCompressedJump(m_buffer, 0b101, offset, 0b01); +} + +void Assembler::C_JAL(Label* label) noexcept { + const auto address = LinkAndGetOffset(label); + C_JAL(static_cast(address)); +} + +void Assembler::C_JAL(int32_t offset) noexcept { + EmitCompressedJump(m_buffer, 0b001, offset, 0b01); +} + +void Assembler::C_FSW(FPR rs2, uint32_t imm, GPR rs1) noexcept { + imm &= 0x7C; + const auto new_imm = ((imm & 0b0100) << 5) | (imm & 0x78); + EmitCompressedStore(m_buffer, 0b111, new_imm, rs1, rs2, 0b00); +} + +void Assembler::C_FSWSP(FPR rs, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 252); + BISCUIT_ASSERT(imm % 4 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x0C0) << 1) | + ((imm & 0x03C) << 7); + // clang-format on + + m_buffer.Emit16(0xE002U | (rs.Index() << 2) | new_imm); +} + +void Assembler::C_JALR(GPR rs) noexcept { + BISCUIT_ASSERT(rs != x0); + m_buffer.Emit16(0x9002 | (rs.Index() << 7)); +} + +void Assembler::C_JR(GPR rs) noexcept { + BISCUIT_ASSERT(rs != x0); + m_buffer.Emit16(0x8002 | (rs.Index() << 7)); +} + +void Assembler::C_LD(GPR rd, uint32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(imm <= 248); + BISCUIT_ASSERT(imm % 8 == 0); + + EmitCompressedLoad(m_buffer, 0b011, imm, rs, rd, 0b00); +} + +void Assembler::C_LDSP(GPR rd, uint32_t imm) noexcept { + BISCUIT_ASSERT(rd != x0); + BISCUIT_ASSERT(imm <= 504); + BISCUIT_ASSERT(imm % 8 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x018) << 2) | + ((imm & 0x1C0) >> 4) | + ((imm & 0x020) << 7); + // clang-format on + + m_buffer.Emit16(0x6002U | (rd.Index() << 7) | new_imm); +} + +void Assembler::C_LI(GPR rd, int32_t imm) noexcept { + BISCUIT_ASSERT(IsValidSigned6BitImm(imm)); + EmitCompressedImmediate(m_buffer, 0b010, static_cast(imm), rd, 0b01); +} + +void Assembler::C_LQ(GPR rd, uint32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(imm <= 496); + BISCUIT_ASSERT(imm % 16 == 0); + + imm &= 0x1F0; + const auto new_imm = ((imm & 0x100) >> 5) | (imm & 0xF0); + EmitCompressedLoad(m_buffer, 0b001, new_imm, rs, rd, 0b00); +} + +void Assembler::C_LQSP(GPR rd, uint32_t imm) noexcept { + BISCUIT_ASSERT(rd != x0); + BISCUIT_ASSERT(imm <= 1008); + BISCUIT_ASSERT(imm % 16 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x020) << 7) | + ((imm & 0x010) << 2) | + ((imm & 0x3C0) >> 4); + // clang-format on + + m_buffer.Emit16(0x2002U | (rd.Index() << 7) | new_imm); +} + +void Assembler::C_LUI(GPR rd, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm != 0); + BISCUIT_ASSERT(rd != x0 && rd != x2); + + const auto new_imm = (imm & 0x3F000) >> 12; + EmitCompressedImmediate(m_buffer, 0b011, new_imm, rd, 0b01); +} + +void Assembler::C_LW(GPR rd, uint32_t imm, GPR rs) noexcept { + BISCUIT_ASSERT(imm <= 124); + BISCUIT_ASSERT(imm % 4 == 0); + + imm &= 0x7C; + const auto new_imm = ((imm & 0b0100) << 5) | (imm & 0x78); + EmitCompressedLoad(m_buffer, 0b010, new_imm, rs, rd, 0b00); +} + +void Assembler::C_LWSP(GPR rd, uint32_t imm) noexcept { + BISCUIT_ASSERT(rd != x0); + BISCUIT_ASSERT(imm <= 252); + BISCUIT_ASSERT(imm % 4 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x020) << 7) | + ((imm & 0x0C0) >> 4) | + ((imm & 0x01C) << 2); + // clang-format on + + m_buffer.Emit16(0x4002U | (rd.Index() << 7) | new_imm); +} + +void Assembler::C_MV(GPR rd, GPR rs) noexcept { + BISCUIT_ASSERT(rd != x0); + BISCUIT_ASSERT(rs != x0); + m_buffer.Emit16(0x8002 | (rd.Index() << 7) | (rs.Index() << 2)); +} + +void Assembler::C_NOP() noexcept { + m_buffer.Emit16(1); +} + +void Assembler::C_OR(GPR rd, GPR rs) noexcept { + EmitCompressedRegArith(m_buffer, 0b100011, rd, 0b10, rs, 0b01); +} + +void Assembler::C_SD(GPR rs2, uint32_t imm, GPR rs1) noexcept { + BISCUIT_ASSERT(imm <= 248); + BISCUIT_ASSERT(imm % 8 == 0); + + EmitCompressedLoad(m_buffer, 0b111, imm, rs1, rs2, 0b00); +} + +void Assembler::C_SDSP(GPR rs, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 504); + BISCUIT_ASSERT(imm % 8 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x038) << 7) | + ((imm & 0x1C0) << 1); + // clang-format on + + m_buffer.Emit16(0xE002U | (rs.Index() << 2) | new_imm); +} + +void Assembler::C_SLLI(GPR rd, uint32_t shift) noexcept { + BISCUIT_ASSERT(rd != x0); + BISCUIT_ASSERT(IsValidCompressedShiftAmount(shift)); + + // RV128C encodes a 64-bit shift with an encoding of 0. + if (shift == 64) { + shift = 0; + } + + const auto shift_enc = ((shift & 0b11111) << 2) | ((shift & 0b100000) << 7); + m_buffer.Emit16(0x0002U | shift_enc | (rd.Index() << 7)); +} + +void Assembler::C_SQ(GPR rs2, uint32_t imm, GPR rs1) noexcept { + BISCUIT_ASSERT(imm <= 496); + BISCUIT_ASSERT(imm % 16 == 0); + + imm &= 0x1F0; + const auto new_imm = ((imm & 0x100) >> 5) | (imm & 0xF0); + EmitCompressedStore(m_buffer, 0b101, new_imm, rs1, rs2, 0b00); +} + +void Assembler::C_SQSP(GPR rs, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 1008); + BISCUIT_ASSERT(imm % 16 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x3C0) << 1) | + ((imm & 0x030) << 7); + // clang-format on + + m_buffer.Emit16(0xA002U | (rs.Index() << 2) | new_imm); +} + +void Assembler::C_SRAI(GPR rd, uint32_t shift) noexcept { + BISCUIT_ASSERT(IsValid3BitCompressedReg(rd)); + BISCUIT_ASSERT(IsValidCompressedShiftAmount(shift)); + + // RV128C encodes a 64-bit shift with an encoding of 0. + if (shift == 64) { + shift = 0; + } + + constexpr auto base = 0x8401U; + const auto shift_enc = ((shift & 0b11111) << 2) | ((shift & 0b100000) << 7); + const auto reg = CompressedRegTo3BitEncoding(rd); + + m_buffer.Emit16(base | shift_enc | (reg << 7)); +} + +void Assembler::C_SRLI(GPR rd, uint32_t shift) noexcept { + BISCUIT_ASSERT(IsValid3BitCompressedReg(rd)); + BISCUIT_ASSERT(IsValidCompressedShiftAmount(shift)); + + // RV128C encodes a 64-bit shift with an encoding of 0. + if (shift == 64) { + shift = 0; + } + + constexpr auto base = 0x8001U; + const auto shift_enc = ((shift & 0b11111) << 2) | ((shift & 0b100000) << 7); + const auto reg = CompressedRegTo3BitEncoding(rd); + + m_buffer.Emit16(base | shift_enc | (reg << 7)); +} + +void Assembler::C_SUB(GPR rd, GPR rs) noexcept { + EmitCompressedRegArith(m_buffer, 0b100011, rd, 0b00, rs, 0b01); +} + +void Assembler::C_SUBW(GPR rd, GPR rs) noexcept { + EmitCompressedRegArith(m_buffer, 0b100111, rd, 0b00, rs, 0b01); +} + +void Assembler::C_SW(GPR rs2, uint32_t imm, GPR rs1) noexcept { + BISCUIT_ASSERT(imm <= 124); + BISCUIT_ASSERT(imm % 4 == 0); + + imm &= 0x7C; + const auto new_imm = ((imm & 0b0100) << 5) | (imm & 0x78); + EmitCompressedStore(m_buffer, 0b110, new_imm, rs1, rs2, 0b00); +} + +void Assembler::C_SWSP(GPR rs, uint32_t imm) noexcept { + BISCUIT_ASSERT(imm <= 252); + BISCUIT_ASSERT(imm % 4 == 0); + + // clang-format off + const auto new_imm = ((imm & 0x0C0) << 1) | + ((imm & 0x03C) << 7); + // clang-format on + + m_buffer.Emit16(0xC002U | (rs.Index() << 2) | new_imm); +} + +void Assembler::C_UNDEF() noexcept { + m_buffer.Emit16(0); +} + +void Assembler::C_XOR(GPR rd, GPR rs) noexcept { + EmitCompressedRegArith(m_buffer, 0b100011, rd, 0b01, rs, 0b01); +} + +// Cache Management Operation Extension Instructions + +void Assembler::CBO_CLEAN(GPR rs) noexcept { + EmitRType(m_buffer, 0b0000000, x1, rs, 0b010, x0, 0b0001111); +} + +void Assembler::CBO_FLUSH(GPR rs) noexcept { + EmitRType(m_buffer, 0b0000000, x2, rs, 0b010, x0, 0b0001111); +} + +void Assembler::CBO_INVAL(GPR rs) noexcept { + EmitRType(m_buffer, 0b0000000, x0, rs, 0b010, x0, 0b0001111); +} + +void Assembler::CBO_ZERO(GPR rs) noexcept { + EmitRType(m_buffer, 0b0000000, x4, rs, 0b010, x0, 0b0001111); +} + +void Assembler::PREFETCH_I(GPR rs, int32_t offset) noexcept { + // Offset must be able to fit in a 12-bit signed immediate and be + // cleanly divisible by 32 since the bottom 5 bits are encoded as zero. + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + BISCUIT_ASSERT(offset % 32 == 0); + EmitIType(m_buffer, static_cast(offset), rs, 0b110, x0, 0b0010011); +} + +void Assembler::PREFETCH_R(GPR rs, int32_t offset) noexcept { + // Offset must be able to fit in a 12-bit signed immediate and be + // cleanly divisible by 32 since the bottom 5 bits are encoded as zero. + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + BISCUIT_ASSERT(offset % 32 == 0); + EmitIType(m_buffer, static_cast(offset) | 0b01, rs, 0b110, x0, 0b0010011); +} + +void Assembler::PREFETCH_W(GPR rs, int32_t offset) noexcept { + // Offset must be able to fit in a 12-bit signed immediate and be + // cleanly divisible by 32 since the bottom 5 bits are encoded as zero. + BISCUIT_ASSERT(IsValidSigned12BitImm(offset)); + BISCUIT_ASSERT(offset % 32 == 0); + EmitIType(m_buffer, static_cast(offset) | 0b11, rs, 0b110, x0, 0b0010011); +} + +// Privileged Instructions + +void Assembler::HFENCE_GVMA(GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0110001, rs2, rs1, 0b000, x0, 0b1110011); +} + +void Assembler::HFENCE_VVMA(GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010001, rs2, rs1, 0b000, x0, 0b1110011); +} + +void Assembler::HINVAL_GVMA(GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0110011, rs2, rs1, 0b000, x0, 0b1110011); +} + +void Assembler::HINVAL_VVMA(GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0010011, rs2, rs1, 0b000, x0, 0b1110011); +} + +void Assembler::HLV_B(GPR rd, GPR rs) noexcept { + EmitRType(m_buffer, 0b0110000, x0, rs, 0b100, rd, 0b1110011); +} + +void Assembler::HLV_BU(GPR rd, GPR rs) noexcept { + EmitRType(m_buffer, 0b0110000, x1, rs, 0b100, rd, 0b1110011); +} + +void Assembler::HLV_D(GPR rd, GPR rs) noexcept { + EmitRType(m_buffer, 0b0110110, x0, rs, 0b100, rd, 0b1110011); +} + +void Assembler::HLV_H(GPR rd, GPR rs) noexcept { + EmitRType(m_buffer, 0b0110010, x0, rs, 0b100, rd, 0b1110011); +} + +void Assembler::HLV_HU(GPR rd, GPR rs) noexcept { + EmitRType(m_buffer, 0b0110010, x1, rs, 0b100, rd, 0b1110011); +} + +void Assembler::HLV_W(GPR rd, GPR rs) noexcept { + EmitRType(m_buffer, 0b0110100, x0, rs, 0b100, rd, 0b1110011); +} + +void Assembler::HLV_WU(GPR rd, GPR rs) noexcept { + EmitRType(m_buffer, 0b0110100, x1, rs, 0b100, rd, 0b1110011); +} + +void Assembler::HLVX_HU(GPR rd, GPR rs) noexcept { + EmitRType(m_buffer, 0b0110010, x3, rs, 0b100, rd, 0b1110011); +} + +void Assembler::HLVX_WU(GPR rd, GPR rs) noexcept { + EmitRType(m_buffer, 0b0110100, x3, rs, 0b100, rd, 0b1110011); +} + +void Assembler::HSV_B(GPR rs2, GPR rs1) noexcept { + EmitRType(m_buffer, 0b0110001, rs2, rs1, 0b100, x0, 0b1110011); +} + +void Assembler::HSV_D(GPR rs2, GPR rs1) noexcept { + EmitRType(m_buffer, 0b0110111, rs2, rs1, 0b100, x0, 0b1110011); +} + +void Assembler::HSV_H(GPR rs2, GPR rs1) noexcept { + EmitRType(m_buffer, 0b0110011, rs2, rs1, 0b100, x0, 0b1110011); +} + +void Assembler::HSV_W(GPR rs2, GPR rs1) noexcept { + EmitRType(m_buffer, 0b0110101, rs2, rs1, 0b100, x0, 0b1110011); +} + +void Assembler::MRET() noexcept { + m_buffer.Emit32(0x30200073); +} + +void Assembler::SFENCE_INVAL_IR() noexcept { + m_buffer.Emit32(0x18100073U); +} + +void Assembler::SFENCE_VMA(GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0001001, rs2, rs1, 0b000, x0, 0b1110011); +} + +void Assembler::SFENCE_W_INVAL() noexcept { + m_buffer.Emit32(0x18000073U); +} + +void Assembler::SINVAL_VMA(GPR rs1, GPR rs2) noexcept { + EmitRType(m_buffer, 0b0001011, rs2, rs1, 0b000, x0, 0b1110011); +} + +void Assembler::SRET() noexcept { + m_buffer.Emit32(0x10200073); +} + +void Assembler::URET() noexcept { + m_buffer.Emit32(0x00200073); +} + +void Assembler::WFI() noexcept { + m_buffer.Emit32(0x10500073); +} + +void Assembler::BindToOffset(Label* label, Label::LocationOffset offset) { + BISCUIT_ASSERT(label != nullptr); + BISCUIT_ASSERT(offset >= 0 && offset <= m_buffer.GetCursorOffset()); + + label->Bind(offset); + ResolveLabelOffsets(label); + label->ClearOffsets(); +} + +ptrdiff_t Assembler::LinkAndGetOffset(Label* label) { + BISCUIT_ASSERT(label != nullptr); + + // If we have a bound label, then it's straightforward to calculate + // the offsets. + if (label->IsBound()) { + const auto cursor_address = m_buffer.GetCursorAddress(); + const auto label_offset = m_buffer.GetOffsetAddress(*label->GetLocation()); + return static_cast(label_offset - cursor_address); + } + + // If we don't have a bound location, we return an offset of zero. + // While the emitter will emit a bogus branch instruction initially, + // the offset will be patched over once the label has been properly + // bound to a location. + label->AddOffset(m_buffer.GetCursorOffset()); + return 0; +} + +void Assembler::ResolveLabelOffsets(Label* label) { + // Conditional branch instructions make use of the B-type immediate encoding for offsets. + const auto is_b_type = [](uint32_t instruction) { + return (instruction & 0x7F) == 0b1100011; + }; + // JAL makes use of the J-type immediate encoding for offsets. + const auto is_j_type = [](uint32_t instruction) { + return (instruction & 0x7F) == 0b1101111; + }; + // C.BEQZ and C.BNEZ make use of this encoding type. + const auto is_cb_type = [](uint32_t instruction) { + const auto op = instruction & 0b11; + const auto funct3 = instruction & 0xE000; + return op == 0b01 && funct3 >= 0xC000; + }; + // C.JAL and C.J make use of this encoding type. + const auto is_cj_type = [](uint32_t instruction) { + const auto op = instruction & 0b11; + const auto funct3 = instruction & 0xE000; + return op == 0b01 && (funct3 == 0x2000 || funct3 == 0xA000); + }; + // If we know an instruction is a compressed branch, then it's a 16-bit instruction + // Otherwise it's a regular-sized 32-bit instruction. + const auto determine_inst_size = [&](uint32_t instruction) -> size_t { + if (is_cj_type(instruction) || is_cb_type(instruction)) { + return 2; + } else { + return 4; + } + }; + + const auto label_location = *label->GetLocation(); + + for (const auto offset : label->m_offsets) { + const auto address = m_buffer.GetOffsetAddress(offset); + auto* const ptr = reinterpret_cast(address); + const auto inst_size = determine_inst_size(uint32_t{*ptr} | (uint32_t{*(ptr + 1)} << 8)); + + uint32_t instruction = 0; + std::memcpy(&instruction, ptr, inst_size); + + // Given all branch instructions we need to patch have 0 encoded as + // their branch offset, we don't need to worry about any masking work. + // + // It's enough to verify that the immediate is going to be valid + // and then OR it into the instruction. + + const auto encoded_offset = label_location - offset; + + if (inst_size == sizeof(uint32_t)) { + if (is_b_type(instruction)) { + BISCUIT_ASSERT(IsValidBTypeImm(encoded_offset)); + instruction |= TransformToBTypeImm(static_cast(encoded_offset)); + } else if (is_j_type(instruction)) { + BISCUIT_ASSERT(IsValidJTypeImm(encoded_offset)); + instruction |= TransformToJTypeImm(static_cast(encoded_offset)); + } + } else { + if (is_cb_type(instruction)) { + BISCUIT_ASSERT(IsValidCBTypeImm(encoded_offset)); + instruction |= TransformToCBTypeImm(static_cast(encoded_offset)); + } else if (is_cj_type(instruction)) { + BISCUIT_ASSERT(IsValidCJTypeImm(encoded_offset)); + instruction |= TransformToCJTypeImm(static_cast(encoded_offset)); + } + } + + std::memcpy(ptr, &instruction, inst_size); + } +} + +} // namespace biscuit diff --git a/dep/biscuit/src/assembler_crypto.cpp b/dep/biscuit/src/assembler_crypto.cpp new file mode 100644 index 000000000..35d741597 --- /dev/null +++ b/dep/biscuit/src/assembler_crypto.cpp @@ -0,0 +1,149 @@ +#include +#include + +namespace biscuit { +namespace { +void EmitAES32Instruction(CodeBuffer& buffer, uint32_t op, GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept { + BISCUIT_ASSERT(bs <= 0b11); + buffer.Emit32(op | (bs << 30) | (rs2.Index() << 20) | + (rs1.Index() << 15) | (rd.Index() << 7)); +} + +void EmitSM4Instruction(CodeBuffer& buffer, uint32_t op, GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept { + // Same behavior, function exists for a better contextual name. + EmitAES32Instruction(buffer, op, rd, rs1, rs2, bs); +} + +void EmitAES64Instruction(CodeBuffer& buffer, uint32_t op, GPR rd, GPR rs1, GPR rs2) noexcept { + buffer.Emit32(op | (rs2.Index() << 20) | (rs1.Index() << 15) | (rd.Index() << 7)); +} + +void EmitSHAInstruction(CodeBuffer& buffer, uint32_t op, GPR rd, GPR rs1, GPR rs2) noexcept { + // Same behavior, function exists for a better contextual name. + EmitAES64Instruction(buffer, op, rd, rs1, rs2); +} + +void EmitSM3Instruction(CodeBuffer& buffer, uint32_t op, GPR rd, GPR rs) noexcept { + // Same behavior, function exists for a better contextual name. + EmitAES64Instruction(buffer, op, rd, rs, x0); +} +} // Anonymous namespace + +void Assembler::AES32DSI(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept { + EmitAES32Instruction(m_buffer, 0x2A000033, rd, rs1, rs2, bs); +} + +void Assembler::AES32DSMI(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept { + EmitAES32Instruction(m_buffer, 0x2E000033, rd, rs1, rs2, bs); +} + +void Assembler::AES32ESI(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept { + EmitAES32Instruction(m_buffer, 0x22000033, rd, rs1, rs2, bs); +} + +void Assembler::AES32ESMI(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept { + EmitAES32Instruction(m_buffer, 0x26000033, rd, rs1, rs2, bs); +} + +void Assembler::AES64DS(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitAES64Instruction(m_buffer, 0x3A000033, rd, rs1, rs2); +} + +void Assembler::AES64DSM(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitAES64Instruction(m_buffer, 0x3E000033, rd, rs1, rs2); +} + +void Assembler::AES64ES(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitAES64Instruction(m_buffer, 0x32000033, rd, rs1, rs2); +} + +void Assembler::AES64ESM(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitAES64Instruction(m_buffer, 0x36000033, rd, rs1, rs2); +} + +void Assembler::AES64IM(GPR rd, GPR rs) noexcept { + EmitAES64Instruction(m_buffer, 0x30001013, rd, rs, x0); +} + +void Assembler::AES64KS1I(GPR rd, GPR rs, uint32_t rnum) noexcept { + // RVK spec states that 0xB to 0xF are reserved. + BISCUIT_ASSERT(rnum <= 0xA); + EmitAES64Instruction(m_buffer, 0x31001013, rd, rs, GPR{rnum}); +} + +void Assembler::AES64KS2(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitAES64Instruction(m_buffer, 0x7E000033, rd, rs1, rs2); +} + +void Assembler::SHA256SIG0(GPR rd, GPR rs) noexcept { + EmitSHAInstruction(m_buffer, 0x10201013, rd, rs, x0); +} + +void Assembler::SHA256SIG1(GPR rd, GPR rs) noexcept { + EmitSHAInstruction(m_buffer, 0x10301013, rd, rs, x0); +} + +void Assembler::SHA256SUM0(GPR rd, GPR rs) noexcept { + EmitSHAInstruction(m_buffer, 0x10001013, rd, rs, x0); +} + +void Assembler::SHA256SUM1(GPR rd, GPR rs) noexcept { + EmitSHAInstruction(m_buffer, 0x10101013, rd, rs, x0); +} + +void Assembler::SHA512SIG0(GPR rd, GPR rs) noexcept { + EmitSHAInstruction(m_buffer, 0x10601013, rd, rs, x0); +} + +void Assembler::SHA512SIG0H(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitSHAInstruction(m_buffer, 0x5C000033, rd, rs1, rs2); +} + +void Assembler::SHA512SIG0L(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitSHAInstruction(m_buffer, 0x54000033, rd, rs1, rs2); +} + +void Assembler::SHA512SIG1(GPR rd, GPR rs) noexcept { + EmitSHAInstruction(m_buffer, 0x10701013, rd, rs, x0); +} + +void Assembler::SHA512SIG1H(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitSHAInstruction(m_buffer, 0x5E000033, rd, rs1, rs2); +} + +void Assembler::SHA512SIG1L(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitSHAInstruction(m_buffer, 0x56000033, rd, rs1, rs2); +} + +void Assembler::SHA512SUM0(GPR rd, GPR rs) noexcept { + EmitSHAInstruction(m_buffer, 0x10401013, rd, rs, x0); +} + +void Assembler::SHA512SUM0R(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitSHAInstruction(m_buffer, 0x50000033, rd, rs1, rs2); +} + +void Assembler::SHA512SUM1(GPR rd, GPR rs) noexcept { + EmitSHAInstruction(m_buffer, 0x10501013, rd, rs, x0); +} + +void Assembler::SHA512SUM1R(GPR rd, GPR rs1, GPR rs2) noexcept { + EmitSHAInstruction(m_buffer, 0x52000033, rd, rs1, rs2); +} + +void Assembler::SM3P0(GPR rd, GPR rs) noexcept { + EmitSM3Instruction(m_buffer, 0x10801013, rd, rs); +} + +void Assembler::SM3P1(GPR rd, GPR rs) noexcept { + EmitSM3Instruction(m_buffer, 0x10901013, rd, rs); +} + +void Assembler::SM4ED(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept { + EmitSM4Instruction(m_buffer, 0x30000033, rd, rs1, rs2, bs); +} + +void Assembler::SM4KS(GPR rd, GPR rs1, GPR rs2, uint32_t bs) noexcept { + EmitSM4Instruction(m_buffer, 0x34000033, rd, rs1, rs2, bs); +} +} // namespace biscuit diff --git a/dep/biscuit/src/assembler_vector.cpp b/dep/biscuit/src/assembler_vector.cpp new file mode 100644 index 000000000..fc9efd3b8 --- /dev/null +++ b/dep/biscuit/src/assembler_vector.cpp @@ -0,0 +1,1951 @@ +#include +#include + +namespace biscuit { +namespace { + +enum class AddressingMode : uint32_t { + // clang-format off + UnitStride = 0b00, + IndexedUnordered = 0b01, + Strided = 0b10, + IndexedOrdered = 0b11, + // clang-format on +}; + +enum class UnitStrideLoadAddressingMode : uint32_t { + // clang-format off + Load = 0b00000, + MaskLoad = 0b01011, + LoadFaultOnlyFirst = 0b10000, + // clang-format on +}; + +enum class UnitStrideStoreAddressingMode : uint32_t { + // clang-format off + Store = 0b00000, + MaskStore = 0b01011, + // clang-format on +}; + +enum class WidthEncoding : uint32_t { + // clang-format off + E8 = 0b000, + E16 = 0b101, + E32 = 0b110, + E64 = 0b111, + // clang-format on +}; + +void EmitVectorLoadImpl(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop, + VecMask vm, uint32_t lumop, GPR rs, WidthEncoding width, Vec vd) noexcept { + BISCUIT_ASSERT(nf <= 8); + + // Fit to encoding space. Allows for being more explicit about the size in calling functions + // (e.g. using 8 for 8 elements instead of 7). + if (nf != 0) { + nf -= 1; + } + + // clang-format off + const auto value = (nf << 29) | + (static_cast(mew) << 28) | + (static_cast(mop) << 26) | + (static_cast(vm) << 25) | + (lumop << 20) | + (rs.Index() << 15) | + (static_cast(width) << 12) | + (vd.Index() << 7); + // clang-format on + + buffer.Emit32(value | 0b111); +} + +void EmitVectorLoad(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop, + VecMask vm, UnitStrideLoadAddressingMode lumop, GPR rs, + WidthEncoding width, Vec vd) noexcept { + EmitVectorLoadImpl(buffer, nf, mew, mop, vm, static_cast(lumop), rs, width, vd); +} + +void EmitVectorLoad(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop, + VecMask vm, GPR rs2, GPR rs1, WidthEncoding width, Vec vd) noexcept { + EmitVectorLoadImpl(buffer, nf, mew, mop, vm, rs2.Index(), rs1, width, vd); +} + +void EmitVectorLoad(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop, + VecMask vm, Vec vs2, GPR rs1, WidthEncoding width, Vec vd) noexcept { + EmitVectorLoadImpl(buffer, nf, mew, mop, vm, vs2.Index(), rs1, width, vd); +} + +void EmitVectorLoadWholeReg(CodeBuffer& buffer, uint32_t nf, bool mew, GPR rs, + WidthEncoding width, Vec vd) noexcept { + // RISC-V V extension spec (as of 1.0RC) only allows these nf values. + BISCUIT_ASSERT(nf == 1 || nf == 2 || nf == 4 || nf == 8); + + EmitVectorLoadImpl(buffer, nf, mew, AddressingMode::UnitStride, + VecMask::No, 0b01000, rs, width, vd); +} + +void EmitVectorStoreImpl(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop, + VecMask vm, uint32_t sumop, GPR rs, WidthEncoding width, Vec vd) noexcept { + BISCUIT_ASSERT(nf <= 8); + + // Fit to encoding space. Allows for being more explicit about the size in calling functions + // (e.g. using 8 for 8 elements instead of 7). + if (nf != 0) { + nf -= 1; + } + + // clang-format off + const auto value = (nf << 29) | + (static_cast(mew) << 28) | + (static_cast(mop) << 26) | + (static_cast(vm) << 25) | + (sumop << 20) | + (rs.Index() << 15) | + (static_cast(width) << 12) | + (vd.Index() << 7); + // clang-format on + + buffer.Emit32(value | 0b100111); +} + +void EmitVectorStore(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop, + VecMask vm, UnitStrideStoreAddressingMode lumop, GPR rs, + WidthEncoding width, Vec vs) noexcept { + EmitVectorStoreImpl(buffer, nf, mew, mop, vm, static_cast(lumop), rs, width, vs); +} + +void EmitVectorStore(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop, + VecMask vm, GPR rs2, GPR rs1, WidthEncoding width, Vec vs3) noexcept { + EmitVectorStoreImpl(buffer, nf, mew, mop, vm, rs2.Index(), rs1, width, vs3); +} + +void EmitVectorStore(CodeBuffer& buffer, uint32_t nf, bool mew, AddressingMode mop, + VecMask vm, Vec vs2, GPR rs1, WidthEncoding width, Vec vs3) noexcept { + EmitVectorStoreImpl(buffer, nf, mew, mop, vm, vs2.Index(), rs1, width, vs3); +} + +void EmitVectorStoreWholeReg(CodeBuffer& buffer, uint32_t nf, GPR rs, Vec vs) noexcept { + // RISC-V V extension spec (as of 1.0RC) only allows these nf values. + BISCUIT_ASSERT(nf == 1 || nf == 2 || nf == 4 || nf == 8); + + EmitVectorStoreImpl(buffer, nf, false, AddressingMode::UnitStride, VecMask::No, + 0b01000, rs, WidthEncoding::E8, vs); +} + +void EmitVectorOPIVIImpl(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, uint32_t imm5, Vec vd) noexcept { + // clang-format off + const auto value = (funct6 << 26) | + (static_cast(vm) << 25) | + (vs2.Index() << 20) | + ((imm5 & 0b11111) << 15) | + (0b011U << 12) | + (vd.Index() << 7); + // clang-format on + + buffer.Emit32(value | 0b1010111); +} + +void EmitVectorOPIVI(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, int32_t simm5, Vec vd) noexcept { + BISCUIT_ASSERT(simm5 >= -16 && simm5 <= 15); + EmitVectorOPIVIImpl(buffer, funct6, vm, vs2, static_cast(simm5), vd); +} + +void EmitVectorOPIVUI(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, uint32_t uimm5, Vec vd) noexcept { + BISCUIT_ASSERT(uimm5 <= 31); + EmitVectorOPIVIImpl(buffer, funct6, vm, vs2, uimm5, vd); +} + +void EmitVectorOPIVV(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept { + // clang-format off + const auto value = (funct6 << 26) | + (static_cast(vm) << 25) | + (vs2.Index() << 20) | + (vs1.Index() << 15) | + (vd.Index() << 7); + // clang-format on + + buffer.Emit32(value | 0b1010111); +} + +void EmitVectorOPIVX(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, GPR rs1, Vec vd) noexcept { + // clang-format off + const auto value = (funct6 << 26) | + (static_cast(vm) << 25) | + (vs2.Index() << 20) | + (rs1.Index() << 15) | + (0b100U << 12) | + (vd.Index() << 7); + // clang-format on + + buffer.Emit32(value | 0b1010111); +} + +void EmitVectorOPMVV(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept { + // clang-format off + const auto value = (funct6 << 26) | + (static_cast(vm) << 25) | + (vs2.Index() << 20) | + (vs1.Index() << 15) | + (0b010U << 12) | + (vd.Index() << 7); + // clang-format on + + buffer.Emit32(value | 0b1010111); +} + +void EmitVectorOPMVX(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, GPR rs1, Vec vd) noexcept { + // clang-format off + const auto value = (funct6 << 26) | + (static_cast(vm) << 25) | + (vs2.Index() << 20) | + (rs1.Index() << 15) | + (0b110U << 12) | + (vd.Index() << 7); + // clang-format on + + buffer.Emit32(value | 0b1010111); +} + +void EmitVectorOPFVV(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, Vec vs1, Vec vd) noexcept { + // clang-format off + const auto value = (funct6 << 26) | + (static_cast(vm) << 25) | + (vs2.Index() << 20) | + (vs1.Index() << 15) | + (0b001U << 12) | + (vd.Index() << 7); + // clang-format on + + buffer.Emit32(value | 0b1010111); +} + +void EmitVectorOPFVF(CodeBuffer& buffer, uint32_t funct6, VecMask vm, Vec vs2, FPR rs1, Vec vd) noexcept { + // clang-format off + const auto value = (funct6 << 26) | + (static_cast(vm) << 25) | + (vs2.Index() << 20) | + (rs1.Index() << 15) | + (0b101U << 12) | + (vd.Index() << 7); + // clang-format on + + buffer.Emit32(value | 0b1010111); +} +} // Anonymous namespace + +// Vector Integer Arithmetic Instructions + +void Assembler::VAADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b001001, mask, vs2, vs1, vd); +} + +void Assembler::VAADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b001001, mask, vs2, rs1, vd); +} + +void Assembler::VAADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b001000, mask, vs2, vs1, vd); +} + +void Assembler::VAADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b001000, mask, vs2, rs1, vd); +} + +void Assembler::VADC(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPIVV(m_buffer, 0b010000, VecMask::Yes, vs2, vs1, vd); +} + +void Assembler::VADC(Vec vd, Vec vs2, GPR rs1) noexcept { + EmitVectorOPIVX(m_buffer, 0b010000, VecMask::Yes, vs2, rs1, vd); +} + +void Assembler::VADC(Vec vd, Vec vs2, int32_t simm) noexcept { + EmitVectorOPIVI(m_buffer, 0b010000, VecMask::Yes, vs2, simm, vd); +} + +void Assembler::VADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b000000, mask, vs2, vs1, vd); +} + +void Assembler::VADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b000000, mask, vs2, rs1, vd); +} + +void Assembler::VADD(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b000000, mask, vs2, simm, vd); +} + +void Assembler::VAND(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b001001, mask, vs2, vs1, vd); +} + +void Assembler::VAND(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b001001, mask, vs2, rs1, vd); +} + +void Assembler::VAND(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b001001, mask, vs2, simm, vd); +} + +void Assembler::VASUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b001011, mask, vs2, vs1, vd); +} + +void Assembler::VASUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b001011, mask, vs2, rs1, vd); +} + +void Assembler::VASUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b001010, mask, vs2, vs1, vd); +} + +void Assembler::VASUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b001010, mask, vs2, rs1, vd); +} + +void Assembler::VCOMPRESS(Vec vd, Vec vs2, Vec vs1) noexcept { + // Note: Destination register may not overlap any of the source registers, + // as per the RVV spec (as of 1.0RC; see section 16.5) + EmitVectorOPMVV(m_buffer, 0b010111, VecMask::No, vs2, vs1, vd); +} + +void Assembler::VDIV(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b100001, mask, vs2, vs1, vd); +} + +void Assembler::VDIV(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b100001, mask, vs2, rs1, vd); +} + +void Assembler::VDIVU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b100000, mask, vs2, vs1, vd); +} + +void Assembler::VDIVU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b100000, mask, vs2, rs1, vd); +} + +void Assembler::VFIRST(GPR rd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010000, mask, vs, v17, Vec{rd.Index()}); +} + +void Assembler::VID(Vec vd, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010100, mask, v0, v17, vd); +} + +void Assembler::VIOTA(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v16, vd); +} + +void Assembler::VMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b101101, mask, vs2, vs1, vd); +} + +void Assembler::VMACC(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b101101, mask, vs2, rs1, vd); +} + +void Assembler::VMADC(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b010001, mask, vs2, vs1, vd); +} + +void Assembler::VMADC(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b010001, mask, vs2, rs1, vd); +} + +void Assembler::VMADC(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b010001, mask, vs2, simm, vd); +} + +void Assembler::VMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b101001, mask, vs2, vs1, vd); +} + +void Assembler::VMADD(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b101001, mask, vs2, rs1, vd); +} + +void Assembler::VMAND(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPMVV(m_buffer, 0b011001, VecMask::No, vs2, vs1, vd); +} + +void Assembler::VMANDNOT(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPMVV(m_buffer, 0b011000, VecMask::No, vs2, vs1, vd); +} + +void Assembler::VMNAND(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPMVV(m_buffer, 0b011101, VecMask::No, vs2, vs1, vd); +} + +void Assembler::VMNOR(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPMVV(m_buffer, 0b011110, VecMask::No, vs2, vs1, vd); +} + +void Assembler::VMOR(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPMVV(m_buffer, 0b011010, VecMask::No, vs2, vs1, vd); +} + +void Assembler::VMORNOT(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPMVV(m_buffer, 0b011100, VecMask::No, vs2, vs1, vd); +} + +void Assembler::VMXNOR(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPMVV(m_buffer, 0b011111, VecMask::No, vs2, vs1, vd); +} + +void Assembler::VMXOR(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPMVV(m_buffer, 0b011011, VecMask::No, vs2, vs1, vd); +} + +void Assembler::VMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b000111, mask, vs2, vs1, vd); +} + +void Assembler::VMAX(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b000111, mask, vs2, rs1, vd); +} + +void Assembler::VMAXU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b000110, mask, vs2, vs1, vd); +} + +void Assembler::VMAXU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b000110, mask, vs2, rs1, vd); +} + +void Assembler::VMERGE(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPIVV(m_buffer, 0b010111, VecMask::Yes, vs2, vs1, vd); +} + +void Assembler::VMERGE(Vec vd, Vec vs2, GPR rs1) noexcept { + EmitVectorOPIVX(m_buffer, 0b010111, VecMask::Yes, vs2, rs1, vd); +} + +void Assembler::VMERGE(Vec vd, Vec vs2, int32_t simm) noexcept { + EmitVectorOPIVI(m_buffer, 0b010111, VecMask::Yes, vs2, simm, vd); +} + +void Assembler::VMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b000101, mask, vs2, vs1, vd); +} + +void Assembler::VMIN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b000101, mask, vs2, rs1, vd); +} + +void Assembler::VMINU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b000100, mask, vs2, vs1, vd); +} + +void Assembler::VMINU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b000100, mask, vs2, rs1, vd); +} + +void Assembler::VMSBC(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b010011, mask, vs2, vs1, vd); +} + +void Assembler::VMSBC(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b010011, mask, vs2, rs1, vd); +} + +void Assembler::VMSBF(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v1, vd); +} + +void Assembler::VMSIF(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v3, vd); +} + +void Assembler::VMSOF(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010100, mask, vs, v2, vd); +} + +void Assembler::VMSEQ(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b011000, mask, vs2, vs1, vd); +} + +void Assembler::VMSEQ(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b011000, mask, vs2, rs1, vd); +} + +void Assembler::VMSEQ(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b011000, mask, vs2, simm, vd); +} + +void Assembler::VMSGT(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b011111, mask, vs2, rs1, vd); +} + +void Assembler::VMSGT(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b011111, mask, vs2, simm, vd); +} + +void Assembler::VMSGTU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b011110, mask, vs2, rs1, vd); +} + +void Assembler::VMSGTU(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b011110, mask, vs2, simm, vd); +} + +void Assembler::VMSLE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b011101, mask, vs2, vs1, vd); +} + +void Assembler::VMSLE(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b011101, mask, vs2, rs1, vd); +} + +void Assembler::VMSLE(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b011101, mask, vs2, simm, vd); +} + +void Assembler::VMSLEU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b011100, mask, vs2, vs1, vd); +} + +void Assembler::VMSLEU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b011100, mask, vs2, rs1, vd); +} + +void Assembler::VMSLEU(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b011100, mask, vs2, simm, vd); +} + +void Assembler::VMSLT(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b011011, mask, vs2, vs1, vd); +} + +void Assembler::VMSLT(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b011011, mask, vs2, rs1, vd); +} + +void Assembler::VMSLTU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b011010, mask, vs2, vs1, vd); +} + +void Assembler::VMSLTU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b011010, mask, vs2, rs1, vd); +} + +void Assembler::VMSNE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b011001, mask, vs2, vs1, vd); +} + +void Assembler::VMSNE(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b011001, mask, vs2, rs1, vd); +} + +void Assembler::VMSNE(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b011001, mask, vs2, simm, vd); +} + +void Assembler::VMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b100101, mask, vs2, vs1, vd); +} + +void Assembler::VMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b100101, mask, vs2, rs1, vd); +} + +void Assembler::VMULH(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b100111, mask, vs2, vs1, vd); +} + +void Assembler::VMULH(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b100111, mask, vs2, rs1, vd); +} + +void Assembler::VMULHSU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b100110, mask, vs2, vs1, vd); +} + +void Assembler::VMULHSU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b100110, mask, vs2, rs1, vd); +} + +void Assembler::VMULHU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b100100, mask, vs2, vs1, vd); +} + +void Assembler::VMULHU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b100100, mask, vs2, rs1, vd); +} + +void Assembler::VMV(Vec vd, Vec vs1) noexcept { + EmitVectorOPIVV(m_buffer, 0b010111, VecMask::No, v0, vs1, vd); +} + +void Assembler::VMV(Vec vd, GPR rs1) noexcept { + EmitVectorOPIVX(m_buffer, 0b010111, VecMask::No, v0, rs1, vd); +} + +void Assembler::VMV(Vec vd, int32_t simm) noexcept { + EmitVectorOPIVI(m_buffer, 0b010111, VecMask::No, v0, simm, vd); +} + +void Assembler::VMV1R(Vec vd, Vec vs) noexcept { + EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00000, vd); +} + +void Assembler::VMV2R(Vec vd, Vec vs) noexcept { + // Registers must be aligned to the register group size, per the + // RVV spec (as of 1.0RC) + BISCUIT_ASSERT(vd.Index() % 2 == 0); + BISCUIT_ASSERT(vs.Index() % 2 == 0); + + EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00001, vd); +} + +void Assembler::VMV4R(Vec vd, Vec vs) noexcept { + // Registers must be aligned to the register group size, per the + // RVV spec (as of 1.0RC) + BISCUIT_ASSERT(vd.Index() % 4 == 0); + BISCUIT_ASSERT(vs.Index() % 4 == 0); + + EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00011, vd); +} + +void Assembler::VMV8R(Vec vd, Vec vs) noexcept { + // Registers must be aligned to the register group size, per the + // RVV spec (as of 1.0RC) + BISCUIT_ASSERT(vd.Index() % 8 == 0); + BISCUIT_ASSERT(vs.Index() % 8 == 0); + + EmitVectorOPIVI(m_buffer, 0b100111, VecMask::No, vs, 0b00111, vd); +} + +void Assembler::VMV_SX(Vec vd, GPR rs) noexcept { + EmitVectorOPMVX(m_buffer, 0b010000, VecMask::No, v0, rs, vd); +} + +void Assembler::VMV_XS(GPR rd, Vec vs) noexcept { + EmitVectorOPMVV(m_buffer, 0b010000, VecMask::No, vs, v0, Vec{rd.Index()}); +} + +void Assembler::VNCLIP(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b101111, mask, vs2, vs1, vd); +} + +void Assembler::VNCLIP(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b101111, mask, vs2, rs1, vd); +} + +void Assembler::VNCLIP(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b101111, mask, vs2, uimm, vd); +} + +void Assembler::VNCLIPU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b101110, mask, vs2, vs1, vd); +} + +void Assembler::VNCLIPU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b101110, mask, vs2, rs1, vd); +} + +void Assembler::VNCLIPU(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b101110, mask, vs2, uimm, vd); +} + +void Assembler::VNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b101111, mask, vs2, vs1, vd); +} + +void Assembler::VNMSAC(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b101111, mask, vs2, rs1, vd); +} + +void Assembler::VNMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b101011, mask, vs2, vs1, vd); +} + +void Assembler::VNMSUB(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b101011, mask, vs2, rs1, vd); +} + +void Assembler::VNSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b101101, mask, vs2, vs1, vd); +} + +void Assembler::VNSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b101101, mask, vs2, rs1, vd); +} + +void Assembler::VNSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b101101, mask, vs2, uimm, vd); +} + +void Assembler::VNSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b101100, mask, vs2, vs1, vd); +} + +void Assembler::VNSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b101100, mask, vs2, rs1, vd); +} + +void Assembler::VNSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b101100, mask, vs2, uimm, vd); +} + +void Assembler::VOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b001010, mask, vs2, vs1, vd); +} + +void Assembler::VOR(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b001010, mask, vs2, rs1, vd); +} + +void Assembler::VOR(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b001010, mask, vs2, simm, vd); +} + +void Assembler::VPOPC(GPR rd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010000, mask, vs, v16, Vec{rd.Index()}); +} + +void Assembler::VREDAND(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b000001, mask, vs2, vs1, vd); +} + +void Assembler::VREDMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b000111, mask, vs2, vs1, vd); +} + +void Assembler::VREDMAXU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b000110, mask, vs2, vs1, vd); +} + +void Assembler::VREDMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b000101, mask, vs2, vs1, vd); +} + +void Assembler::VREDMINU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b000100, mask, vs2, vs1, vd); +} + +void Assembler::VREDOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b000010, mask, vs2, vs1, vd); +} + +void Assembler::VREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b000000, mask, vs2, vs1, vd); +} + +void Assembler::VREDXOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b000011, mask, vs2, vs1, vd); +} + +void Assembler::VREM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b100011, mask, vs2, vs1, vd); +} + +void Assembler::VREM(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b100011, mask, vs2, rs1, vd); +} + +void Assembler::VREMU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b100010, mask, vs2, vs1, vd); +} + +void Assembler::VREMU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b100010, mask, vs2, rs1, vd); +} + +void Assembler::VRGATHER(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b001100, mask, vs2, vs1, vd); +} + +void Assembler::VRGATHER(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b001100, mask, vs2, rs1, vd); +} + +void Assembler::VRGATHER(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b001100, mask, vs2, uimm, vd); +} + +void Assembler::VRGATHEREI16(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b001110, mask, vs2, vs1, vd); +} + +void Assembler::VRSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b000011, mask, vs2, rs1, vd); +} + +void Assembler::VRSUB(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b000011, mask, vs2, simm, vd); +} + +void Assembler::VSADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b100001, mask, vs2, vs1, vd); +} + +void Assembler::VSADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b100001, mask, vs2, rs1, vd); +} + +void Assembler::VSADD(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b100001, mask, vs2, simm, vd); +} + +void Assembler::VSADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b100000, mask, vs2, vs1, vd); +} + +void Assembler::VSADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b100000, mask, vs2, rs1, vd); +} + +void Assembler::VSADDU(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b100000, mask, vs2, simm, vd); +} + +void Assembler::VSBC(Vec vd, Vec vs2, Vec vs1) noexcept { + EmitVectorOPIVV(m_buffer, 0b010010, VecMask::Yes, vs2, vs1, vd); +} + +void Assembler::VSBC(Vec vd, Vec vs2, GPR rs1) noexcept { + EmitVectorOPIVX(m_buffer, 0b010010, VecMask::Yes, vs2, rs1, vd); +} + +void Assembler::VSEXTVF2(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v7, vd); +} + +void Assembler::VSEXTVF4(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v5, vd); +} + +void Assembler::VSEXTVF8(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v3, vd); +} + +void Assembler::VSLIDE1DOWN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b001111, mask, vs2, rs1, vd); +} + +void Assembler::VSLIDEDOWN(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b001111, mask, vs2, rs1, vd); +} + +void Assembler::VSLIDEDOWN(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b001111, mask, vs2, uimm, vd); +} + +void Assembler::VSLIDE1UP(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b001110, mask, vs2, rs1, vd); +} + +void Assembler::VSLIDEUP(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b001110, mask, vs2, rs1, vd); +} + +void Assembler::VSLIDEUP(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b001110, mask, vs2, uimm, vd); +} + +void Assembler::VSLL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b100101, mask, vs2, vs1, vd); +} + +void Assembler::VSLL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b100101, mask, vs2, rs1, vd); +} + +void Assembler::VSLL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b100101, mask, vs2, uimm, vd); +} + +void Assembler::VSMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b100111, mask, vs2, vs1, vd); +} + +void Assembler::VSMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b100111, mask, vs2, rs1, vd); +} + +void Assembler::VSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b101001, mask, vs2, vs1, vd); +} + +void Assembler::VSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b101001, mask, vs2, rs1, vd); +} + +void Assembler::VSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b101001, mask, vs2, uimm, vd); +} + +void Assembler::VSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b101000, mask, vs2, vs1, vd); +} + +void Assembler::VSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b101000, mask, vs2, rs1, vd); +} + +void Assembler::VSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b101000, mask, vs2, uimm, vd); +} + +void Assembler::VSSRA(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b101011, mask, vs2, vs1, vd); +} + +void Assembler::VSSRA(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b101011, mask, vs2, rs1, vd); +} + +void Assembler::VSSRA(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b101011, mask, vs2, uimm, vd); +} + +void Assembler::VSSRL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b101010, mask, vs2, vs1, vd); +} + +void Assembler::VSSRL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b101010, mask, vs2, rs1, vd); +} + +void Assembler::VSSRL(Vec vd, Vec vs2, uint32_t uimm, VecMask mask) noexcept { + EmitVectorOPIVUI(m_buffer, 0b101010, mask, vs2, uimm, vd); +} + +void Assembler::VSSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b100011, mask, vs2, vs1, vd); +} + +void Assembler::VSSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b100011, mask, vs2, rs1, vd); +} + +void Assembler::VSSUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b100010, mask, vs2, vs1, vd); +} + +void Assembler::VSSUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b100010, mask, vs2, rs1, vd); +} + +void Assembler::VSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b000010, mask, vs2, vs1, vd); +} + +void Assembler::VSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b000010, mask, vs2, rs1, vd); +} + +void Assembler::VWADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b110001, mask, vs2, vs1, vd); +} + +void Assembler::VWADD(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b110001, mask, vs2, rs1, vd); +} + +void Assembler::VWADDW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b110101, mask, vs2, vs1, vd); +} + +void Assembler::VWADDW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b110101, mask, vs2, rs1, vd); +} + +void Assembler::VWADDU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b110000, mask, vs2, vs1, vd); +} + +void Assembler::VWADDU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b110000, mask, vs2, rs1, vd); +} + +void Assembler::VWADDUW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b110100, mask, vs2, vs1, vd); +} + +void Assembler::VWADDUW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b110100, mask, vs2, rs1, vd); +} + +void Assembler::VWMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b111101, mask, vs2, vs1, vd); +} + +void Assembler::VWMACC(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b111101, mask, vs2, rs1, vd); +} + +void Assembler::VWMACCSU(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b111111, mask, vs2, vs1, vd); +} + +void Assembler::VWMACCSU(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b111111, mask, vs2, rs1, vd); +} + +void Assembler::VWMACCU(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b111100, mask, vs2, vs1, vd); +} + +void Assembler::VWMACCU(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b111100, mask, vs2, rs1, vd); +} + +void Assembler::VWMACCUS(Vec vd, GPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b111110, mask, vs2, rs1, vd); +} + +void Assembler::VWMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b111011, mask, vs2, vs1, vd); +} + +void Assembler::VWMUL(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b111011, mask, vs2, rs1, vd); +} + +void Assembler::VWMULSU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b111010, mask, vs2, vs1, vd); +} + +void Assembler::VWMULSU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b111010, mask, vs2, rs1, vd); +} + +void Assembler::VWMULU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b111000, mask, vs2, vs1, vd); +} + +void Assembler::VWMULU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b111000, mask, vs2, rs1, vd); +} + +void Assembler::VWREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b110001, mask, vs2, vs1, vd); +} + +void Assembler::VWREDSUMU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b110000, mask, vs2, vs1, vd); +} + +void Assembler::VWSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b110011, mask, vs2, vs1, vd); +} + +void Assembler::VWSUB(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b110011, mask, vs2, rs1, vd); +} + +void Assembler::VWSUBW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b110111, mask, vs2, vs1, vd); +} + +void Assembler::VWSUBW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b110111, mask, vs2, rs1, vd); +} + +void Assembler::VWSUBU(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b110010, mask, vs2, vs1, vd); +} + +void Assembler::VWSUBU(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b110010, mask, vs2, rs1, vd); +} + +void Assembler::VWSUBUW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b110110, mask, vs2, vs1, vd); +} + +void Assembler::VWSUBUW(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPMVX(m_buffer, 0b110110, mask, vs2, rs1, vd); +} + +void Assembler::VXOR(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPIVV(m_buffer, 0b001011, mask, vs2, vs1, vd); +} + +void Assembler::VXOR(Vec vd, Vec vs2, GPR rs1, VecMask mask) noexcept { + EmitVectorOPIVX(m_buffer, 0b001011, mask, vs2, rs1, vd); +} + +void Assembler::VXOR(Vec vd, Vec vs2, int32_t simm, VecMask mask) noexcept { + EmitVectorOPIVI(m_buffer, 0b001011, mask, vs2, simm, vd); +} + +void Assembler::VZEXTVF2(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v6, vd); +} + +void Assembler::VZEXTVF4(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v4, vd); +} + +void Assembler::VZEXTVF8(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPMVV(m_buffer, 0b010010, mask, vs, v2, vd); +} + +// Vector Floating-Point Instructions + +void Assembler::VFADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b000000, mask, vs2, vs1, vd); +} + +void Assembler::VFADD(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b000000, mask, vs2, rs1, vd); +} + +void Assembler::VFCLASS(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v16, vd); +} + +void Assembler::VFCVT_F_X(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v3, vd); +} + +void Assembler::VFCVT_F_XU(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v2, vd); +} + +void Assembler::VFCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v7, vd); +} + +void Assembler::VFCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v6, vd); +} + +void Assembler::VFCVT_X_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v1, vd); +} + +void Assembler::VFCVT_XU_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v0, vd); +} + +void Assembler::VFNCVT_F_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v20, vd); +} + +void Assembler::VFNCVT_F_X(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v19, vd); +} + +void Assembler::VFNCVT_F_XU(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v18, vd); +} + +void Assembler::VFNCVT_ROD_F_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v21, vd); +} + +void Assembler::VFNCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v23, vd); +} + +void Assembler::VFNCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v22, vd); +} + +void Assembler::VFNCVT_X_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v17, vd); +} + +void Assembler::VFNCVT_XU_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v16, vd); +} + +void Assembler::VFWCVT_F_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v12, vd); +} + +void Assembler::VFWCVT_F_X(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v11, vd); +} + +void Assembler::VFWCVT_F_XU(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v10, vd); +} + +void Assembler::VFWCVT_RTZ_X_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v15, vd); +} + +void Assembler::VFWCVT_RTZ_XU_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v14, vd); +} + +void Assembler::VFWCVT_X_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v9, vd); +} + +void Assembler::VFWCVT_XU_F(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010010, mask, vs, v8, vd); +} + +void Assembler::VFDIV(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b100000, mask, vs2, vs1, vd); +} + +void Assembler::VFDIV(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b100000, mask, vs2, rs1, vd); +} + +void Assembler::VFRDIV(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b100001, mask, vs2, rs1, vd); +} + +void Assembler::VFREDMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b000111, mask, vs2, vs1, vd); +} + +void Assembler::VFREDMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b000101, mask, vs2, vs1, vd); +} + +void Assembler::VFREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b000001, mask, vs2, vs1, vd); +} + +void Assembler::VFREDOSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b000011, mask, vs2, vs1, vd); +} + +void Assembler::VFMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b101100, mask, vs2, vs1, vd); +} + +void Assembler::VFMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b101100, mask, vs2, rs1, vd); +} + +void Assembler::VFMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b101000, mask, vs2, vs1, vd); +} + +void Assembler::VFMADD(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b101000, mask, vs2, rs1, vd); +} + +void Assembler::VFMAX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b000110, mask, vs2, vs1, vd); +} + +void Assembler::VFMAX(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b000110, mask, vs2, rs1, vd); +} + +void Assembler::VFMERGE(Vec vd, Vec vs2, FPR rs1) noexcept { + EmitVectorOPFVF(m_buffer, 0b010111, VecMask::Yes, vs2, rs1, vd); +} + +void Assembler::VFMIN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b000100, mask, vs2, vs1, vd); +} + +void Assembler::VFMIN(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b000100, mask, vs2, rs1, vd); +} + +void Assembler::VFMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b101110, mask, vs2, vs1, vd); +} + +void Assembler::VFMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b101110, mask, vs2, rs1, vd); +} + +void Assembler::VFMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b101010, mask, vs2, vs1, vd); +} + +void Assembler::VFMSUB(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b101010, mask, vs2, rs1, vd); +} + +void Assembler::VFMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b100100, mask, vs2, vs1, vd); +} + +void Assembler::VFMUL(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b100100, mask, vs2, rs1, vd); +} + +void Assembler::VFMV(Vec vd, FPR rs) noexcept { + EmitVectorOPFVF(m_buffer, 0b010111, VecMask::No, v0, rs, vd); +} + +void Assembler::VFMV_FS(FPR rd, Vec vs) noexcept { + EmitVectorOPFVV(m_buffer, 0b010000, VecMask::No, vs, v0, Vec{rd.Index()}); +} + +void Assembler::VFMV_SF(Vec vd, FPR rs) noexcept { + EmitVectorOPFVF(m_buffer, 0b010000, VecMask::No, v0, rs, vd); +} + +void Assembler::VFNMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b101101, mask, vs2, vs1, vd); +} + +void Assembler::VFNMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b101101, mask, vs2, rs1, vd); +} + +void Assembler::VFNMADD(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b101001, mask, vs2, vs1, vd); +} + +void Assembler::VFNMADD(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b101001, mask, vs2, rs1, vd); +} + +void Assembler::VFNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b101111, mask, vs2, vs1, vd); +} + +void Assembler::VFNMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b101111, mask, vs2, rs1, vd); +} + +void Assembler::VFNMSUB(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b101011, mask, vs2, vs1, vd); +} + +void Assembler::VFNMSUB(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b101011, mask, vs2, rs1, vd); +} + +void Assembler::VFREC7(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v5, vd); +} + +void Assembler::VFSGNJ(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b001000, mask, vs2, vs1, vd); +} + +void Assembler::VFSGNJ(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b001000, mask, vs2, rs1, vd); +} + +void Assembler::VFSGNJN(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b001001, mask, vs2, vs1, vd); +} + +void Assembler::VFSGNJN(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b001001, mask, vs2, rs1, vd); +} + +void Assembler::VFSGNJX(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b001010, mask, vs2, vs1, vd); +} + +void Assembler::VFSGNJX(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b001010, mask, vs2, rs1, vd); +} + +void Assembler::VFSQRT(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v0, vd); +} + +void Assembler::VFRSQRT7(Vec vd, Vec vs, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b010011, mask, vs, v4, vd); +} + +void Assembler::VFSLIDE1DOWN(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b001111, mask, vs2, rs1, vd); +} + +void Assembler::VFSLIDE1UP(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b001110, mask, vs2, rs1, vd); +} + +void Assembler::VFSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b000010, mask, vs2, vs1, vd); +} + +void Assembler::VFSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b000010, mask, vs2, rs1, vd); +} + +void Assembler::VFRSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b100111, mask, vs2, rs1, vd); +} + +void Assembler::VFWADD(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b110000, mask, vs2, vs1, vd); +} + +void Assembler::VFWADD(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b110000, mask, vs2, rs1, vd); +} + +void Assembler::VFWADDW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b110100, mask, vs2, vs1, vd); +} + +void Assembler::VFWADDW(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b110100, mask, vs2, rs1, vd); +} + +void Assembler::VFWMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b111100, mask, vs2, vs1, vd); +} + +void Assembler::VFWMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b111100, mask, vs2, rs1, vd); +} + +void Assembler::VFWMUL(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b111000, mask, vs2, vs1, vd); +} + +void Assembler::VFWMUL(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b111000, mask, vs2, rs1, vd); +} + +void Assembler::VFWNMACC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b111101, mask, vs2, vs1, vd); +} + +void Assembler::VFWNMACC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b111101, mask, vs2, rs1, vd); +} + +void Assembler::VFWNMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b111111, mask, vs2, vs1, vd); +} + +void Assembler::VFWNMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b111111, mask, vs2, rs1, vd); +} + +void Assembler::VFWREDSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b110001, mask, vs2, vs1, vd); +} + +void Assembler::VFWREDOSUM(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b110011, mask, vs2, vs1, vd); +} + +void Assembler::VFWMSAC(Vec vd, Vec vs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b111110, mask, vs2, vs1, vd); +} + +void Assembler::VFWMSAC(Vec vd, FPR rs1, Vec vs2, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b111110, mask, vs2, rs1, vd); +} + +void Assembler::VFWSUB(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b110010, mask, vs2, vs1, vd); +} + +void Assembler::VFWSUB(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b110010, mask, vs2, rs1, vd); +} + +void Assembler::VFWSUBW(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b110110, mask, vs2, vs1, vd); +} + +void Assembler::VFWSUBW(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b110110, mask, vs2, rs1, vd); +} + +void Assembler::VMFEQ(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b011000, mask, vs2, vs1, vd); +} + +void Assembler::VMFEQ(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b011000, mask, vs2, rs1, vd); +} + +void Assembler::VMFGE(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b011111, mask, vs2, rs1, vd); +} + +void Assembler::VMFGT(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b011101, mask, vs2, rs1, vd); +} + +void Assembler::VMFLE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b011001, mask, vs2, vs1, vd); +} + +void Assembler::VMFLE(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b011001, mask, vs2, rs1, vd); +} + +void Assembler::VMFLT(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b011011, mask, vs2, vs1, vd); +} + +void Assembler::VMFLT(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b011011, mask, vs2, rs1, vd); +} + +void Assembler::VMFNE(Vec vd, Vec vs2, Vec vs1, VecMask mask) noexcept { + EmitVectorOPFVV(m_buffer, 0b011100, mask, vs2, vs1, vd); +} + +void Assembler::VMFNE(Vec vd, Vec vs2, FPR rs1, VecMask mask) noexcept { + EmitVectorOPFVF(m_buffer, 0b011100, mask, vs2, rs1, vd); +} + +// Vector Load/Store Instructions + +void Assembler::VLE8(Vec vd, GPR rs, VecMask mask) noexcept { + VLSEGE8(1, vd, rs, mask); +} + +void Assembler::VLE16(Vec vd, GPR rs, VecMask mask) noexcept { + VLSEGE16(1, vd, rs, mask); +} + +void Assembler::VLE32(Vec vd, GPR rs, VecMask mask) noexcept { + VLSEGE32(1, vd, rs, mask); +} + +void Assembler::VLE64(Vec vd, GPR rs, VecMask mask) noexcept { + VLSEGE64(1, vd, rs, mask); +} + +void Assembler::VLM(Vec vd, GPR rs) noexcept { + EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, VecMask::No, + UnitStrideLoadAddressingMode::MaskLoad, rs, WidthEncoding::E8, vd); +} + +void Assembler::VLSE8(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept { + VLSSEGE8(1, vd, rs1, rs2, mask); +} + +void Assembler::VLSE16(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept { + VLSSEGE16(1, vd, rs1, rs2, mask); +} + +void Assembler::VLSE32(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept { + VLSSEGE32(1, vd, rs1, rs2, mask); +} + +void Assembler::VLSE64(Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept { + VLSSEGE64(1, vd, rs1, rs2, mask); +} + +void Assembler::VLOXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VLOXSEGEI8(1, vd, rs, vs, mask); +} + +void Assembler::VLOXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VLOXSEGEI16(1, vd, rs, vs, mask); +} + +void Assembler::VLOXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VLOXSEGEI32(1, vd, rs, vs, mask); +} + +void Assembler::VLOXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VLOXSEGEI64(1, vd, rs, vs, mask); +} + +void Assembler::VLUXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VLUXSEGEI8(1, vd, rs, vs, mask); +} + +void Assembler::VLUXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VLUXSEGEI16(1, vd, rs, vs, mask); +} + +void Assembler::VLUXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VLUXSEGEI32(1, vd, rs, vs, mask); +} + +void Assembler::VLUXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VLUXSEGEI64(1, vd, rs, vs, mask); +} + +void Assembler::VLE8FF(Vec vd, GPR rs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask, + UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E8, vd); +} + +void Assembler::VLE16FF(Vec vd, GPR rs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask, + UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E16, vd); +} + +void Assembler::VLE32FF(Vec vd, GPR rs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask, + UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E32, vd); +} + +void Assembler::VLE64FF(Vec vd, GPR rs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, 0b000, false, AddressingMode::UnitStride, mask, + UnitStrideLoadAddressingMode::LoadFaultOnlyFirst, rs, WidthEncoding::E64, vd); +} + +void Assembler::VLSEGE8(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask, + UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E8, vd); +} + +void Assembler::VLSEGE16(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask, + UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E16, vd); +} + +void Assembler::VLSEGE32(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask, + UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E32, vd); +} + +void Assembler::VLSEGE64(uint32_t num_segments, Vec vd, GPR rs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::UnitStride, mask, + UnitStrideLoadAddressingMode::Load, rs, WidthEncoding::E64, vd); +} + +void Assembler::VLSSEGE8(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask, + rs2, rs1, WidthEncoding::E8, vd); +} + +void Assembler::VLSSEGE16(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask, + rs2, rs1, WidthEncoding::E16, vd); +} + +void Assembler::VLSSEGE32(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask, + rs2, rs1, WidthEncoding::E32, vd); +} + +void Assembler::VLSSEGE64(uint32_t num_segments, Vec vd, GPR rs1, GPR rs2, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::Strided, mask, + rs2, rs1, WidthEncoding::E64, vd); +} + +void Assembler::VLOXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask, + vs, rs, WidthEncoding::E8, vd); +} + +void Assembler::VLOXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask, + vs, rs, WidthEncoding::E16, vd); +} + +void Assembler::VLOXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask, + vs, rs, WidthEncoding::E32, vd); +} + +void Assembler::VLOXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask, + vs, rs, WidthEncoding::E64, vd); +} + +void Assembler::VLUXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask, + vs, rs, WidthEncoding::E8, vd); +} + +void Assembler::VLUXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask, + vs, rs, WidthEncoding::E16, vd); +} + +void Assembler::VLUXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask, + vs, rs, WidthEncoding::E32, vd); +} + +void Assembler::VLUXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorLoad(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask, + vs, rs, WidthEncoding::E64, vd); +} + +void Assembler::VLRE8(uint32_t num_registers, Vec vd, GPR rs) noexcept { + BISCUIT_ASSERT(vd.Index() % num_registers == 0); + EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E8, vd); +} + +void Assembler::VL1RE8(Vec vd, GPR rs) noexcept { + VLRE8(1, vd, rs); +} + +void Assembler::VL2RE8(Vec vd, GPR rs) noexcept { + VLRE8(2, vd, rs); +} + +void Assembler::VL4RE8(Vec vd, GPR rs) noexcept { + VLRE8(4, vd, rs); +} + +void Assembler::VL8RE8(Vec vd, GPR rs) noexcept { + VLRE8(8, vd, rs); +} + +void Assembler::VLRE16(uint32_t num_registers, Vec vd, GPR rs) noexcept { + BISCUIT_ASSERT(vd.Index() % num_registers == 0); + EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E16, vd); +} + +void Assembler::VL1RE16(Vec vd, GPR rs) noexcept { + VLRE16(1, vd, rs); +} + +void Assembler::VL2RE16(Vec vd, GPR rs) noexcept { + VLRE16(2, vd, rs); +} + +void Assembler::VL4RE16(Vec vd, GPR rs) noexcept { + VLRE16(4, vd, rs); +} + +void Assembler::VL8RE16(Vec vd, GPR rs) noexcept { + VLRE16(8, vd, rs); +} + +void Assembler::VLRE32(uint32_t num_registers, Vec vd, GPR rs) noexcept { + BISCUIT_ASSERT(vd.Index() % num_registers == 0); + EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E32, vd); +} + +void Assembler::VL1RE32(Vec vd, GPR rs) noexcept { + VLRE32(1, vd, rs); +} + +void Assembler::VL2RE32(Vec vd, GPR rs) noexcept { + VLRE32(2, vd, rs); +} + +void Assembler::VL4RE32(Vec vd, GPR rs) noexcept { + VLRE32(4, vd, rs); +} + +void Assembler::VL8RE32(Vec vd, GPR rs) noexcept { + VLRE32(8, vd, rs); +} + +void Assembler::VLRE64(uint32_t num_registers, Vec vd, GPR rs) noexcept { + BISCUIT_ASSERT(vd.Index() % num_registers == 0); + EmitVectorLoadWholeReg(m_buffer, num_registers, false, rs, WidthEncoding::E64, vd); +} + +void Assembler::VL1RE64(Vec vd, GPR rs) noexcept { + VLRE64(1, vd, rs); +} + +void Assembler::VL2RE64(Vec vd, GPR rs) noexcept { + VLRE64(2, vd, rs); +} + +void Assembler::VL4RE64(Vec vd, GPR rs) noexcept { + VLRE64(4, vd, rs); +} + +void Assembler::VL8RE64(Vec vd, GPR rs) noexcept { + VLRE64(8, vd, rs); +} + +void Assembler::VSE8(Vec vs, GPR rs, VecMask mask) noexcept { + VSSEGE8(1, vs, rs, mask); +} + +void Assembler::VSE16(Vec vs, GPR rs, VecMask mask) noexcept { + VSSEGE16(1, vs, rs, mask); +} + +void Assembler::VSE32(Vec vs, GPR rs, VecMask mask) noexcept { + VSSEGE32(1, vs, rs, mask); +} + +void Assembler::VSE64(Vec vs, GPR rs, VecMask mask) noexcept { + VSSEGE64(1, vs, rs, mask); +} + +void Assembler::VSM(Vec vs, GPR rs) noexcept { + EmitVectorStore(m_buffer, 0b000, false, AddressingMode::UnitStride, VecMask::No, + UnitStrideStoreAddressingMode::MaskStore, rs, WidthEncoding::E8, vs); +} + +void Assembler::VSSE8(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept { + VSSSEGE8(1, vs, rs1, rs2, mask); +} + +void Assembler::VSSE16(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept { + VSSSEGE16(1, vs, rs1, rs2, mask); +} + +void Assembler::VSSE32(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept { + VSSSEGE32(1, vs, rs1, rs2, mask); +} + +void Assembler::VSSE64(Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept { + VSSSEGE64(1, vs, rs1, rs2, mask); +} + +void Assembler::VSOXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VSOXSEGEI8(1, vd, rs, vs, mask); +} + +void Assembler::VSOXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VSOXSEGEI16(1, vd, rs, vs, mask); +} + +void Assembler::VSOXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VSOXSEGEI32(1, vd, rs, vs, mask); +} + +void Assembler::VSOXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VSOXSEGEI64(1, vd, rs, vs, mask); +} + +void Assembler::VSUXEI8(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VSUXSEGEI8(1, vd, rs, vs, mask); +} + +void Assembler::VSUXEI16(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VSUXSEGEI16(1, vd, rs, vs, mask); +} + +void Assembler::VSUXEI32(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VSUXSEGEI32(1, vd, rs, vs, mask); +} + +void Assembler::VSUXEI64(Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + VSUXSEGEI64(1, vd, rs, vs, mask); +} + +void Assembler::VSSEGE8(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask, + UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E8, vs); +} + +void Assembler::VSSEGE16(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask, + UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E16, vs); +} + +void Assembler::VSSEGE32(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask, + UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E32, vs); +} + +void Assembler::VSSEGE64(uint32_t num_segments, Vec vs, GPR rs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::UnitStride, mask, + UnitStrideStoreAddressingMode::Store, rs, WidthEncoding::E64, vs); +} + +void Assembler::VSSSEGE8(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask, + rs2, rs1, WidthEncoding::E8, vs); +} + +void Assembler::VSSSEGE16(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask, + rs2, rs1, WidthEncoding::E16, vs); +} + +void Assembler::VSSSEGE32(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask, + rs2, rs1, WidthEncoding::E32, vs); +} + +void Assembler::VSSSEGE64(uint32_t num_segments, Vec vs, GPR rs1, GPR rs2, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::Strided, mask, + rs2, rs1, WidthEncoding::E64, vs); +} + +void Assembler::VSOXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask, + vs, rs, WidthEncoding::E8, vd); +} + +void Assembler::VSOXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask, + vs, rs, WidthEncoding::E16, vd); +} + +void Assembler::VSOXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask, + vs, rs, WidthEncoding::E32, vd); +} + +void Assembler::VSOXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedOrdered, mask, + vs, rs, WidthEncoding::E64, vd); +} + +void Assembler::VSUXSEGEI8(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask, + vs, rs, WidthEncoding::E8, vd); +} + +void Assembler::VSUXSEGEI16(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask, + vs, rs, WidthEncoding::E16, vd); +} + +void Assembler::VSUXSEGEI32(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask, + vs, rs, WidthEncoding::E32, vd); +} + +void Assembler::VSUXSEGEI64(uint32_t num_segments, Vec vd, GPR rs, Vec vs, VecMask mask) noexcept { + EmitVectorStore(m_buffer, num_segments, false, AddressingMode::IndexedUnordered, mask, + vs, rs, WidthEncoding::E64, vd); +} + +void Assembler::VSR(uint32_t num_registers, Vec vs, GPR rs) noexcept { + EmitVectorStoreWholeReg(m_buffer, num_registers, rs, vs); +} + +void Assembler::VS1R(Vec vs, GPR rs) noexcept { + VSR(1, vs, rs); +} + +void Assembler::VS2R(Vec vs, GPR rs) noexcept { + BISCUIT_ASSERT(vs.Index() % 2 == 0); + VSR(2, vs, rs); +} + +void Assembler::VS4R(Vec vs, GPR rs) noexcept { + BISCUIT_ASSERT(vs.Index() % 4 == 0); + VSR(4, vs, rs); +} + +void Assembler::VS8R(Vec vs, GPR rs) noexcept { + BISCUIT_ASSERT(vs.Index() % 8 == 0); + VSR(8, vs, rs); +} + +void Assembler::VSETIVLI(GPR rd, uint32_t imm, SEW sew, LMUL lmul, VTA vta, VMA vma) noexcept { + // Immediate must be able to fit in 5 bits. + BISCUIT_ASSERT(imm <= 31); + + // clang-format off + const auto zimm = static_cast(lmul) | + (static_cast(sew) << 3) | + (static_cast(vta) << 6) | + (static_cast(vma) << 7); + // clang-format on + + m_buffer.Emit32(0xC0007057U | (zimm << 20) | (imm << 15) | (rd.Index() << 7)); +} + +void Assembler::VSETVL(GPR rd, GPR rs1, GPR rs2) noexcept { + m_buffer.Emit32(0x80007057U | (rs2.Index() << 20) | (rs1.Index() << 15) | (rd.Index() << 7)); +} + +void Assembler::VSETVLI(GPR rd, GPR rs, SEW sew, LMUL lmul, VTA vta, VMA vma) noexcept { + // clang-format off + const auto zimm = static_cast(lmul) | + (static_cast(sew) << 3) | + (static_cast(vta) << 6) | + (static_cast(vma) << 7); + // clang-format on + + m_buffer.Emit32(0x00007057U | (zimm << 20) | (rs.Index() << 15) | (rd.Index() << 7)); +} + +} // namespace biscuit diff --git a/dep/biscuit/src/code_buffer.cpp b/dep/biscuit/src/code_buffer.cpp new file mode 100644 index 000000000..386be3757 --- /dev/null +++ b/dep/biscuit/src/code_buffer.cpp @@ -0,0 +1,111 @@ +#include +#include + +#include +#include + +#ifdef BISCUIT_CODE_BUFFER_MMAP +#include +#endif + +namespace biscuit { + +CodeBuffer::CodeBuffer(size_t capacity) + : m_capacity{capacity}, m_is_managed{true} { + if (capacity == 0) { + return; + } + +#ifdef BISCUIT_CODE_BUFFER_MMAP + m_buffer = static_cast(mmap(nullptr, capacity, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + -1, 0)); + BISCUIT_ASSERT(m_buffer != nullptr); +#else + m_buffer = new uint8_t[capacity](); +#endif + + m_cursor = m_buffer; +} + +CodeBuffer::CodeBuffer(uint8_t* buffer, size_t capacity) + : m_buffer{buffer}, m_cursor{buffer}, m_capacity{capacity} { + BISCUIT_ASSERT(buffer != nullptr); +} + +CodeBuffer::CodeBuffer(CodeBuffer&& other) noexcept + : m_buffer{std::exchange(other.m_buffer, nullptr)} + , m_cursor{std::exchange(other.m_cursor, nullptr)} + , m_capacity{std::exchange(other.m_capacity, size_t{0})} + , m_is_managed{std::exchange(other.m_is_managed, false)} {} + +CodeBuffer& CodeBuffer::operator=(CodeBuffer&& other) noexcept { + if (this == &other) { + return *this; + } + + std::swap(m_buffer, other.m_buffer); + std::swap(m_cursor, other.m_cursor); + std::swap(m_capacity, other.m_capacity); + std::swap(m_is_managed, other.m_is_managed); + return *this; +} + +CodeBuffer::~CodeBuffer() noexcept { + if (!m_is_managed) { + return; + } + +#ifdef BISCUIT_CODE_BUFFER_MMAP + munmap(m_buffer, m_capacity); +#else + delete[] m_buffer; +#endif +} + +void CodeBuffer::Grow(size_t new_capacity) { + BISCUIT_ASSERT(IsManaged()); + + // No-op, just return. + if (new_capacity <= m_capacity) { + return; + } + + const auto cursor_offset = GetCursorOffset(); + +#ifdef BISCUIT_CODE_BUFFER_MMAP + auto* new_buffer = static_cast(mremap(m_buffer, m_capacity, new_capacity, MREMAP_MAYMOVE)); + BISCUIT_ASSERT(new_buffer != nullptr); +#else + auto* new_buffer = new uint8_t[new_capacity](); + std::memcpy(new_buffer, m_buffer, m_capacity); + delete[] m_buffer; +#endif + + m_buffer = new_buffer; + m_capacity = new_capacity; + m_cursor = m_buffer + cursor_offset; +} + +void CodeBuffer::SetExecutable() { +#ifdef BISCUIT_CODE_BUFFER_MMAP + const auto result = mprotect(m_buffer, m_capacity, PROT_READ | PROT_EXEC); + BISCUIT_ASSERT(result == 0); +#else + // Unimplemented/Unnecessary for new + BISCUIT_ASSERT(false); +#endif +} + +void CodeBuffer::SetWritable() { +#ifdef BISCUIT_CODE_BUFFER_MMAP + const auto result = mprotect(m_buffer, m_capacity, PROT_READ | PROT_WRITE); + BISCUIT_ASSERT(result == 0); +#else + // Unimplemented/Unnecessary for new + BISCUIT_ASSERT(false); +#endif +} + +} // namespace biscuit diff --git a/dep/biscuit/src/cpuinfo.cpp b/dep/biscuit/src/cpuinfo.cpp new file mode 100644 index 000000000..b09446038 --- /dev/null +++ b/dep/biscuit/src/cpuinfo.cpp @@ -0,0 +1,39 @@ +// Copyright (c), 2022, KNS Group LLC (YADRO) +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +#include + +namespace biscuit { + +bool CPUInfo::Has(RISCVExtension extension) const { +#if defined(__linux__) && defined(__riscv) + const static uint64_t features = getauxval(AT_HWCAP) & ( + COMPAT_HWCAP_ISA_I | + COMPAT_HWCAP_ISA_M | + COMPAT_HWCAP_ISA_A | + COMPAT_HWCAP_ISA_F | + COMPAT_HWCAP_ISA_D | + COMPAT_HWCAP_ISA_C | + COMPAT_HWCAP_ISA_V + ); +#else + const static uint64_t features = 0; +#endif + + return (features & static_cast(extension)) != 0; +} + +uint32_t CPUInfo::GetVlenb() const { + if(Has(RISCVExtension::V)) { + static CSRReader csrReader; + const static auto getVLEN = csrReader.GetCode(); + return getVLEN(); + } + + return 0; +} + +} // namespace biscuit diff --git a/dep/riscv-disas/CMakeLists.txt b/dep/riscv-disas/CMakeLists.txt new file mode 100644 index 000000000..e91dbf456 --- /dev/null +++ b/dep/riscv-disas/CMakeLists.txt @@ -0,0 +1,7 @@ +add_library(riscv-disas + include/riscv-disas.h + src/riscv-disas.c +) + +target_include_directories(riscv-disas PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include") + diff --git a/dep/riscv-disas/README.md b/dep/riscv-disas/README.md new file mode 100644 index 000000000..9633113c6 --- /dev/null +++ b/dep/riscv-disas/README.md @@ -0,0 +1,10 @@ +# RISC-V Disassembler + +RISC-V Disassembler with support for RV32/RV64/RV128 IMAFDC + +## Build instructions + +``` +cmake -B build +cmake --build build +``` diff --git a/dep/riscv-disas/include/riscv-disas.h b/dep/riscv-disas/include/riscv-disas.h new file mode 100644 index 000000000..e9dd0706c --- /dev/null +++ b/dep/riscv-disas/include/riscv-disas.h @@ -0,0 +1,520 @@ +/* + * RISC-V Disassembler + * + * Copyright (c) 2016-2017 Michael Clark + * Copyright (c) 2017-2018 SiFive, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef RISCV_DISASSEMBLER_H +#define RISCV_DISASSEMBLER_H + +#include +#include +#include +#include +#include +#include + +/* types */ + +typedef uint64_t rv_inst; +typedef uint16_t rv_opcode; + +/* enums */ + +typedef enum { + rv32, + rv64, + rv128 +} rv_isa; + +typedef enum { + rv_rm_rne = 0, + rv_rm_rtz = 1, + rv_rm_rdn = 2, + rv_rm_rup = 3, + rv_rm_rmm = 4, + rv_rm_dyn = 7, +} rv_rm; + +typedef enum { + rv_fence_i = 8, + rv_fence_o = 4, + rv_fence_r = 2, + rv_fence_w = 1, +} rv_fence; + +typedef enum { + rv_ireg_zero, + rv_ireg_ra, + rv_ireg_sp, + rv_ireg_gp, + rv_ireg_tp, + rv_ireg_t0, + rv_ireg_t1, + rv_ireg_t2, + rv_ireg_s0, + rv_ireg_s1, + rv_ireg_a0, + rv_ireg_a1, + rv_ireg_a2, + rv_ireg_a3, + rv_ireg_a4, + rv_ireg_a5, + rv_ireg_a6, + rv_ireg_a7, + rv_ireg_s2, + rv_ireg_s3, + rv_ireg_s4, + rv_ireg_s5, + rv_ireg_s6, + rv_ireg_s7, + rv_ireg_s8, + rv_ireg_s9, + rv_ireg_s10, + rv_ireg_s11, + rv_ireg_t3, + rv_ireg_t4, + rv_ireg_t5, + rv_ireg_t6, +} rv_ireg; + +typedef enum { + rvc_end, + rvc_rd_eq_ra, + rvc_rd_eq_x0, + rvc_rs1_eq_x0, + rvc_rs2_eq_x0, + rvc_rs2_eq_rs1, + rvc_rs1_eq_ra, + rvc_imm_eq_zero, + rvc_imm_eq_n1, + rvc_imm_eq_p1, + rvc_csr_eq_0x001, + rvc_csr_eq_0x002, + rvc_csr_eq_0x003, + rvc_csr_eq_0xc00, + rvc_csr_eq_0xc01, + rvc_csr_eq_0xc02, + rvc_csr_eq_0xc80, + rvc_csr_eq_0xc81, + rvc_csr_eq_0xc82, +} rvc_constraint; + +typedef enum { + rv_codec_illegal, + rv_codec_none, + rv_codec_u, + rv_codec_uj, + rv_codec_i, + rv_codec_i_sh5, + rv_codec_i_sh6, + rv_codec_i_sh7, + rv_codec_i_csr, + rv_codec_s, + rv_codec_sb, + rv_codec_r, + rv_codec_r_m, + rv_codec_r4_m, + rv_codec_r_a, + rv_codec_r_l, + rv_codec_r_f, + rv_codec_cb, + rv_codec_cb_imm, + rv_codec_cb_sh5, + rv_codec_cb_sh6, + rv_codec_ci, + rv_codec_ci_sh5, + rv_codec_ci_sh6, + rv_codec_ci_16sp, + rv_codec_ci_lwsp, + rv_codec_ci_ldsp, + rv_codec_ci_lqsp, + rv_codec_ci_li, + rv_codec_ci_lui, + rv_codec_ci_none, + rv_codec_ciw_4spn, + rv_codec_cj, + rv_codec_cj_jal, + rv_codec_cl_lw, + rv_codec_cl_ld, + rv_codec_cl_lq, + rv_codec_cr, + rv_codec_cr_mv, + rv_codec_cr_jalr, + rv_codec_cr_jr, + rv_codec_cs, + rv_codec_cs_sw, + rv_codec_cs_sd, + rv_codec_cs_sq, + rv_codec_css_swsp, + rv_codec_css_sdsp, + rv_codec_css_sqsp, +} rv_codec; + +typedef enum { + rv_op_illegal, + rv_op_lui, + rv_op_auipc, + rv_op_jal, + rv_op_jalr, + rv_op_beq, + rv_op_bne, + rv_op_blt, + rv_op_bge, + rv_op_bltu, + rv_op_bgeu, + rv_op_lb, + rv_op_lh, + rv_op_lw, + rv_op_lbu, + rv_op_lhu, + rv_op_sb, + rv_op_sh, + rv_op_sw, + rv_op_addi, + rv_op_slti, + rv_op_sltiu, + rv_op_xori, + rv_op_ori, + rv_op_andi, + rv_op_slli, + rv_op_srli, + rv_op_srai, + rv_op_add, + rv_op_sub, + rv_op_sll, + rv_op_slt, + rv_op_sltu, + rv_op_xor, + rv_op_srl, + rv_op_sra, + rv_op_or, + rv_op_and, + rv_op_fence, + rv_op_fence_i, + rv_op_lwu, + rv_op_ld, + rv_op_sd, + rv_op_addiw, + rv_op_slliw, + rv_op_srliw, + rv_op_sraiw, + rv_op_addw, + rv_op_subw, + rv_op_sllw, + rv_op_srlw, + rv_op_sraw, + rv_op_ldu, + rv_op_lq, + rv_op_sq, + rv_op_addid, + rv_op_sllid, + rv_op_srlid, + rv_op_sraid, + rv_op_addd, + rv_op_subd, + rv_op_slld, + rv_op_srld, + rv_op_srad, + rv_op_mul, + rv_op_mulh, + rv_op_mulhsu, + rv_op_mulhu, + rv_op_div, + rv_op_divu, + rv_op_rem, + rv_op_remu, + rv_op_mulw, + rv_op_divw, + rv_op_divuw, + rv_op_remw, + rv_op_remuw, + rv_op_muld, + rv_op_divd, + rv_op_divud, + rv_op_remd, + rv_op_remud, + rv_op_lr_w, + rv_op_sc_w, + rv_op_amoswap_w, + rv_op_amoadd_w, + rv_op_amoxor_w, + rv_op_amoor_w, + rv_op_amoand_w, + rv_op_amomin_w, + rv_op_amomax_w, + rv_op_amominu_w, + rv_op_amomaxu_w, + rv_op_lr_d, + rv_op_sc_d, + rv_op_amoswap_d, + rv_op_amoadd_d, + rv_op_amoxor_d, + rv_op_amoor_d, + rv_op_amoand_d, + rv_op_amomin_d, + rv_op_amomax_d, + rv_op_amominu_d, + rv_op_amomaxu_d, + rv_op_lr_q, + rv_op_sc_q, + rv_op_amoswap_q, + rv_op_amoadd_q, + rv_op_amoxor_q, + rv_op_amoor_q, + rv_op_amoand_q, + rv_op_amomin_q, + rv_op_amomax_q, + rv_op_amominu_q, + rv_op_amomaxu_q, + rv_op_ecall, + rv_op_ebreak, + rv_op_uret, + rv_op_sret, + rv_op_hret, + rv_op_mret, + rv_op_dret, + rv_op_sfence_vm, + rv_op_sfence_vma, + rv_op_wfi, + rv_op_csrrw, + rv_op_csrrs, + rv_op_csrrc, + rv_op_csrrwi, + rv_op_csrrsi, + rv_op_csrrci, + rv_op_flw, + rv_op_fsw, + rv_op_fmadd_s, + rv_op_fmsub_s, + rv_op_fnmsub_s, + rv_op_fnmadd_s, + rv_op_fadd_s, + rv_op_fsub_s, + rv_op_fmul_s, + rv_op_fdiv_s, + rv_op_fsgnj_s, + rv_op_fsgnjn_s, + rv_op_fsgnjx_s, + rv_op_fmin_s, + rv_op_fmax_s, + rv_op_fsqrt_s, + rv_op_fle_s, + rv_op_flt_s, + rv_op_feq_s, + rv_op_fcvt_w_s, + rv_op_fcvt_wu_s, + rv_op_fcvt_s_w, + rv_op_fcvt_s_wu, + rv_op_fmv_x_s, + rv_op_fclass_s, + rv_op_fmv_s_x, + rv_op_fcvt_l_s, + rv_op_fcvt_lu_s, + rv_op_fcvt_s_l, + rv_op_fcvt_s_lu, + rv_op_fld, + rv_op_fsd, + rv_op_fmadd_d, + rv_op_fmsub_d, + rv_op_fnmsub_d, + rv_op_fnmadd_d, + rv_op_fadd_d, + rv_op_fsub_d, + rv_op_fmul_d, + rv_op_fdiv_d, + rv_op_fsgnj_d, + rv_op_fsgnjn_d, + rv_op_fsgnjx_d, + rv_op_fmin_d, + rv_op_fmax_d, + rv_op_fcvt_s_d, + rv_op_fcvt_d_s, + rv_op_fsqrt_d, + rv_op_fle_d, + rv_op_flt_d, + rv_op_feq_d, + rv_op_fcvt_w_d, + rv_op_fcvt_wu_d, + rv_op_fcvt_d_w, + rv_op_fcvt_d_wu, + rv_op_fclass_d, + rv_op_fcvt_l_d, + rv_op_fcvt_lu_d, + rv_op_fmv_x_d, + rv_op_fcvt_d_l, + rv_op_fcvt_d_lu, + rv_op_fmv_d_x, + rv_op_flq, + rv_op_fsq, + rv_op_fmadd_q, + rv_op_fmsub_q, + rv_op_fnmsub_q, + rv_op_fnmadd_q, + rv_op_fadd_q, + rv_op_fsub_q, + rv_op_fmul_q, + rv_op_fdiv_q, + rv_op_fsgnj_q, + rv_op_fsgnjn_q, + rv_op_fsgnjx_q, + rv_op_fmin_q, + rv_op_fmax_q, + rv_op_fcvt_s_q, + rv_op_fcvt_q_s, + rv_op_fcvt_d_q, + rv_op_fcvt_q_d, + rv_op_fsqrt_q, + rv_op_fle_q, + rv_op_flt_q, + rv_op_feq_q, + rv_op_fcvt_w_q, + rv_op_fcvt_wu_q, + rv_op_fcvt_q_w, + rv_op_fcvt_q_wu, + rv_op_fclass_q, + rv_op_fcvt_l_q, + rv_op_fcvt_lu_q, + rv_op_fcvt_q_l, + rv_op_fcvt_q_lu, + rv_op_fmv_x_q, + rv_op_fmv_q_x, + rv_op_c_addi4spn, + rv_op_c_fld, + rv_op_c_lw, + rv_op_c_flw, + rv_op_c_fsd, + rv_op_c_sw, + rv_op_c_fsw, + rv_op_c_nop, + rv_op_c_addi, + rv_op_c_jal, + rv_op_c_li, + rv_op_c_addi16sp, + rv_op_c_lui, + rv_op_c_srli, + rv_op_c_srai, + rv_op_c_andi, + rv_op_c_sub, + rv_op_c_xor, + rv_op_c_or, + rv_op_c_and, + rv_op_c_subw, + rv_op_c_addw, + rv_op_c_j, + rv_op_c_beqz, + rv_op_c_bnez, + rv_op_c_slli, + rv_op_c_fldsp, + rv_op_c_lwsp, + rv_op_c_flwsp, + rv_op_c_jr, + rv_op_c_mv, + rv_op_c_ebreak, + rv_op_c_jalr, + rv_op_c_add, + rv_op_c_fsdsp, + rv_op_c_swsp, + rv_op_c_fswsp, + rv_op_c_ld, + rv_op_c_sd, + rv_op_c_addiw, + rv_op_c_ldsp, + rv_op_c_sdsp, + rv_op_c_lq, + rv_op_c_sq, + rv_op_c_lqsp, + rv_op_c_sqsp, + rv_op_nop, + rv_op_mv, + rv_op_not, + rv_op_neg, + rv_op_negw, + rv_op_sext_w, + rv_op_seqz, + rv_op_snez, + rv_op_sltz, + rv_op_sgtz, + rv_op_fmv_s, + rv_op_fabs_s, + rv_op_fneg_s, + rv_op_fmv_d, + rv_op_fabs_d, + rv_op_fneg_d, + rv_op_fmv_q, + rv_op_fabs_q, + rv_op_fneg_q, + rv_op_beqz, + rv_op_bnez, + rv_op_blez, + rv_op_bgez, + rv_op_bltz, + rv_op_bgtz, + rv_op_ble, + rv_op_bleu, + rv_op_bgt, + rv_op_bgtu, + rv_op_j, + rv_op_ret, + rv_op_jr, + rv_op_rdcycle, + rv_op_rdtime, + rv_op_rdinstret, + rv_op_rdcycleh, + rv_op_rdtimeh, + rv_op_rdinstreth, + rv_op_frcsr, + rv_op_frrm, + rv_op_frflags, + rv_op_fscsr, + rv_op_fsrm, + rv_op_fsflags, + rv_op_fsrmi, + rv_op_fsflagsi, +} rv_op; + +/* structures */ + +typedef struct { + uint64_t pc; + uint64_t inst; + int32_t imm; + uint16_t op; + uint8_t codec; + uint8_t rd; + uint8_t rs1; + uint8_t rs2; + uint8_t rs3; + uint8_t rm; + uint8_t pred; + uint8_t succ; + uint8_t aq; + uint8_t rl; +} rv_decode; + +/* functions */ + +size_t inst_length(rv_inst inst); +void inst_fetch(const uint8_t *data, rv_inst *instp, size_t *length); +void disasm_inst(char *buf, size_t buflen, rv_isa isa, uint64_t pc, rv_inst inst); + +#endif diff --git a/dep/riscv-disas/source.txt b/dep/riscv-disas/source.txt new file mode 100644 index 000000000..5dd42c601 --- /dev/null +++ b/dep/riscv-disas/source.txt @@ -0,0 +1 @@ +https://github.com/michaeljclark/riscv-disassembler \ No newline at end of file diff --git a/dep/riscv-disas/src/riscv-disas.c b/dep/riscv-disas/src/riscv-disas.c new file mode 100644 index 000000000..09c393c87 --- /dev/null +++ b/dep/riscv-disas/src/riscv-disas.c @@ -0,0 +1,2276 @@ +/* + * RISC-V Disassembler + * + * Copyright (c) 2016-2017 Michael Clark + * Copyright (c) 2017-2018 SiFive, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "riscv-disas.h" + +typedef struct { + const int op; + const rvc_constraint *constraints; +} rv_comp_data; + +enum { + rvcd_imm_nz = 0x1, + rvcd_imm_nz_hint = 0x2 +}; + +typedef struct { + const char * const name; + const rv_codec codec; + const char * const format; + const rv_comp_data *pseudo; + const short decomp_rv32; + const short decomp_rv64; + const short decomp_rv128; + const short decomp_data; +} rv_opcode_data; + +/* register names */ + +static const char rv_ireg_name_sym[32][5] = { + "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", + "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", + "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", + "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", +}; + +static const char rv_freg_name_sym[32][5] = { + "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", + "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", + "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", + "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11", +}; + +/* instruction formats */ + +static const char rv_fmt_none[] = "O\t"; +static const char rv_fmt_rs1[] = "O\t1"; +static const char rv_fmt_offset[] = "O\to"; +static const char rv_fmt_pred_succ[] = "O\tp,s"; +static const char rv_fmt_rs1_rs2[] = "O\t1,2"; +static const char rv_fmt_rd_imm[] = "O\t0,i"; +static const char rv_fmt_rd_offset[] = "O\t0,o"; +static const char rv_fmt_rd_rs1_rs2[] = "O\t0,1,2"; +static const char rv_fmt_frd_rs1[] = "O\t3,1"; +static const char rv_fmt_rd_frs1[] = "O\t0,4"; +static const char rv_fmt_rd_frs1_frs2[] = "O\t0,4,5"; +static const char rv_fmt_frd_frs1_frs2[] = "O\t3,4,5"; +static const char rv_fmt_rm_frd_frs1[] = "O\tr,3,4"; +static const char rv_fmt_rm_frd_rs1[] = "O\tr,3,1"; +static const char rv_fmt_rm_rd_frs1[] = "O\tr,0,4"; +static const char rv_fmt_rm_frd_frs1_frs2[] = "O\tr,3,4,5"; +static const char rv_fmt_rm_frd_frs1_frs2_frs3[] = "O\tr,3,4,5,6"; +static const char rv_fmt_rd_rs1_imm[] = "O\t0,1,i"; +static const char rv_fmt_rd_rs1_offset[] = "O\t0,1,i"; +static const char rv_fmt_rd_offset_rs1[] = "O\t0,i(1)"; +static const char rv_fmt_frd_offset_rs1[] = "O\t3,i(1)"; +static const char rv_fmt_rd_csr_rs1[] = "O\t0,c,1"; +static const char rv_fmt_rd_csr_zimm[] = "O\t0,c,7"; +static const char rv_fmt_rs2_offset_rs1[] = "O\t2,i(1)"; +static const char rv_fmt_frs2_offset_rs1[] = "O\t5,i(1)"; +static const char rv_fmt_rs1_rs2_offset[] = "O\t1,2,o"; +static const char rv_fmt_rs2_rs1_offset[] = "O\t2,1,o"; +static const char rv_fmt_aqrl_rd_rs2_rs1[] = "OAR\t0,2,(1)"; +static const char rv_fmt_aqrl_rd_rs1[] = "OAR\t0,(1)"; +static const char rv_fmt_rd[] = "O\t0"; +static const char rv_fmt_rd_zimm[] = "O\t0,7"; +static const char rv_fmt_rd_rs1[] = "O\t0,1"; +static const char rv_fmt_rd_rs2[] = "O\t0,2"; +static const char rv_fmt_rs1_offset[] = "O\t1,o"; +static const char rv_fmt_rs2_offset[] = "O\t2,o"; + +/* pseudo-instruction constraints */ + +static const rvc_constraint rvcc_last[] = { rvc_end }; +static const rvc_constraint rvcc_imm_eq_zero[] = { rvc_imm_eq_zero, rvc_end }; +static const rvc_constraint rvcc_imm_eq_n1[] = { rvc_imm_eq_n1, rvc_end }; +static const rvc_constraint rvcc_imm_eq_p1[] = { rvc_imm_eq_p1, rvc_end }; +static const rvc_constraint rvcc_rs1_eq_x0[] = { rvc_rs1_eq_x0, rvc_end }; +static const rvc_constraint rvcc_rs2_eq_x0[] = { rvc_rs2_eq_x0, rvc_end }; +static const rvc_constraint rvcc_rs2_eq_rs1[] = { rvc_rs2_eq_rs1, rvc_end }; +static const rvc_constraint rvcc_jal_j[] = { rvc_rd_eq_x0, rvc_end }; +static const rvc_constraint rvcc_jal_jal[] = { rvc_rd_eq_ra, rvc_end }; +static const rvc_constraint rvcc_jalr_jr[] = { rvc_rd_eq_x0, rvc_imm_eq_zero, rvc_end }; +static const rvc_constraint rvcc_jalr_jalr[] = { rvc_rd_eq_ra, rvc_imm_eq_zero, rvc_end }; +static const rvc_constraint rvcc_jalr_ret[] = { rvc_rd_eq_x0, rvc_rs1_eq_ra, rvc_end }; +static const rvc_constraint rvcc_addi_nop[] = { rvc_rd_eq_x0, rvc_rs1_eq_x0, rvc_imm_eq_zero, rvc_end }; +static const rvc_constraint rvcc_rdcycle[] = { rvc_rs1_eq_x0, rvc_csr_eq_0xc00, rvc_end }; +static const rvc_constraint rvcc_rdtime[] = { rvc_rs1_eq_x0, rvc_csr_eq_0xc01, rvc_end }; +static const rvc_constraint rvcc_rdinstret[] = { rvc_rs1_eq_x0, rvc_csr_eq_0xc02, rvc_end }; +static const rvc_constraint rvcc_rdcycleh[] = { rvc_rs1_eq_x0, rvc_csr_eq_0xc80, rvc_end }; +static const rvc_constraint rvcc_rdtimeh[] = { rvc_rs1_eq_x0, rvc_csr_eq_0xc81, rvc_end }; +static const rvc_constraint rvcc_rdinstreth[] = { rvc_rs1_eq_x0, rvc_csr_eq_0xc82, rvc_end }; +static const rvc_constraint rvcc_frcsr[] = { rvc_rs1_eq_x0, rvc_csr_eq_0x003, rvc_end }; +static const rvc_constraint rvcc_frrm[] = { rvc_rs1_eq_x0, rvc_csr_eq_0x002, rvc_end }; +static const rvc_constraint rvcc_frflags[] = { rvc_rs1_eq_x0, rvc_csr_eq_0x001, rvc_end }; +static const rvc_constraint rvcc_fscsr[] = { rvc_csr_eq_0x003, rvc_end }; +static const rvc_constraint rvcc_fsrm[] = { rvc_csr_eq_0x002, rvc_end }; +static const rvc_constraint rvcc_fsflags[] = { rvc_csr_eq_0x001, rvc_end }; +static const rvc_constraint rvcc_fsrmi[] = { rvc_csr_eq_0x002, rvc_end }; +static const rvc_constraint rvcc_fsflagsi[] = { rvc_csr_eq_0x001, rvc_end }; + +/* pseudo-instruction metadata */ + +static const rv_comp_data rvcp_jal[] = { + { rv_op_j, rvcc_jal_j }, + { rv_op_jal, rvcc_jal_jal }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_jalr[] = { + { rv_op_ret, rvcc_jalr_ret }, + { rv_op_jr, rvcc_jalr_jr }, + { rv_op_jalr, rvcc_jalr_jalr }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_beq[] = { + { rv_op_beqz, rvcc_rs2_eq_x0 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_bne[] = { + { rv_op_bnez, rvcc_rs2_eq_x0 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_blt[] = { + { rv_op_bltz, rvcc_rs2_eq_x0 }, + { rv_op_bgtz, rvcc_rs1_eq_x0 }, + { rv_op_bgt, rvcc_last }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_bge[] = { + { rv_op_blez, rvcc_rs1_eq_x0 }, + { rv_op_bgez, rvcc_rs2_eq_x0 }, + { rv_op_ble, rvcc_last }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_bltu[] = { + { rv_op_bgtu, rvcc_last }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_bgeu[] = { + { rv_op_bleu, rvcc_last }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_addi[] = { + { rv_op_nop, rvcc_addi_nop }, + { rv_op_mv, rvcc_imm_eq_zero }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_sltiu[] = { + { rv_op_seqz, rvcc_imm_eq_p1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_xori[] = { + { rv_op_not, rvcc_imm_eq_n1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_sub[] = { + { rv_op_neg, rvcc_rs1_eq_x0 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_slt[] = { + { rv_op_sltz, rvcc_rs2_eq_x0 }, + { rv_op_sgtz, rvcc_rs1_eq_x0 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_sltu[] = { + { rv_op_snez, rvcc_rs1_eq_x0 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_addiw[] = { + { rv_op_sext_w, rvcc_imm_eq_zero }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_subw[] = { + { rv_op_negw, rvcc_rs1_eq_x0 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_csrrw[] = { + { rv_op_fscsr, rvcc_fscsr }, + { rv_op_fsrm, rvcc_fsrm }, + { rv_op_fsflags, rvcc_fsflags }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_csrrs[] = { + { rv_op_rdcycle, rvcc_rdcycle }, + { rv_op_rdtime, rvcc_rdtime }, + { rv_op_rdinstret, rvcc_rdinstret }, + { rv_op_rdcycleh, rvcc_rdcycleh }, + { rv_op_rdtimeh, rvcc_rdtimeh }, + { rv_op_rdinstreth, rvcc_rdinstreth }, + { rv_op_frcsr, rvcc_frcsr }, + { rv_op_frrm, rvcc_frrm }, + { rv_op_frflags, rvcc_frflags }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_csrrwi[] = { + { rv_op_fsrmi, rvcc_fsrmi }, + { rv_op_fsflagsi, rvcc_fsflagsi }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_fsgnj_s[] = { + { rv_op_fmv_s, rvcc_rs2_eq_rs1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_fsgnjn_s[] = { + { rv_op_fneg_s, rvcc_rs2_eq_rs1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_fsgnjx_s[] = { + { rv_op_fabs_s, rvcc_rs2_eq_rs1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_fsgnj_d[] = { + { rv_op_fmv_d, rvcc_rs2_eq_rs1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_fsgnjn_d[] = { + { rv_op_fneg_d, rvcc_rs2_eq_rs1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_fsgnjx_d[] = { + { rv_op_fabs_d, rvcc_rs2_eq_rs1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_fsgnj_q[] = { + { rv_op_fmv_q, rvcc_rs2_eq_rs1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_fsgnjn_q[] = { + { rv_op_fneg_q, rvcc_rs2_eq_rs1 }, + { rv_op_illegal, NULL } +}; + +static const rv_comp_data rvcp_fsgnjx_q[] = { + { rv_op_fabs_q, rvcc_rs2_eq_rs1 }, + { rv_op_illegal, NULL } +}; + +/* instruction metadata */ + +const rv_opcode_data opcode_data[] = { + { "illegal", rv_codec_illegal, rv_fmt_none, NULL, 0, 0, 0 }, + { "lui", rv_codec_u, rv_fmt_rd_imm, NULL, 0, 0, 0 }, + { "auipc", rv_codec_u, rv_fmt_rd_offset, NULL, 0, 0, 0 }, + { "jal", rv_codec_uj, rv_fmt_rd_offset, rvcp_jal, 0, 0, 0 }, + { "jalr", rv_codec_i, rv_fmt_rd_rs1_offset, rvcp_jalr, 0, 0, 0 }, + { "beq", rv_codec_sb, rv_fmt_rs1_rs2_offset, rvcp_beq, 0, 0, 0 }, + { "bne", rv_codec_sb, rv_fmt_rs1_rs2_offset, rvcp_bne, 0, 0, 0 }, + { "blt", rv_codec_sb, rv_fmt_rs1_rs2_offset, rvcp_blt, 0, 0, 0 }, + { "bge", rv_codec_sb, rv_fmt_rs1_rs2_offset, rvcp_bge, 0, 0, 0 }, + { "bltu", rv_codec_sb, rv_fmt_rs1_rs2_offset, rvcp_bltu, 0, 0, 0 }, + { "bgeu", rv_codec_sb, rv_fmt_rs1_rs2_offset, rvcp_bgeu, 0, 0, 0 }, + { "lb", rv_codec_i, rv_fmt_rd_offset_rs1, NULL, 0, 0, 0 }, + { "lh", rv_codec_i, rv_fmt_rd_offset_rs1, NULL, 0, 0, 0 }, + { "lw", rv_codec_i, rv_fmt_rd_offset_rs1, NULL, 0, 0, 0 }, + { "lbu", rv_codec_i, rv_fmt_rd_offset_rs1, NULL, 0, 0, 0 }, + { "lhu", rv_codec_i, rv_fmt_rd_offset_rs1, NULL, 0, 0, 0 }, + { "sb", rv_codec_s, rv_fmt_rs2_offset_rs1, NULL, 0, 0, 0 }, + { "sh", rv_codec_s, rv_fmt_rs2_offset_rs1, NULL, 0, 0, 0 }, + { "sw", rv_codec_s, rv_fmt_rs2_offset_rs1, NULL, 0, 0, 0 }, + { "addi", rv_codec_i, rv_fmt_rd_rs1_imm, rvcp_addi, 0, 0, 0 }, + { "slti", rv_codec_i, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "sltiu", rv_codec_i, rv_fmt_rd_rs1_imm, rvcp_sltiu, 0, 0, 0 }, + { "xori", rv_codec_i, rv_fmt_rd_rs1_imm, rvcp_xori, 0, 0, 0 }, + { "ori", rv_codec_i, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "andi", rv_codec_i, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "slli", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "srli", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "srai", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "add", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sub", rv_codec_r, rv_fmt_rd_rs1_rs2, rvcp_sub, 0, 0, 0 }, + { "sll", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "slt", rv_codec_r, rv_fmt_rd_rs1_rs2, rvcp_slt, 0, 0, 0 }, + { "sltu", rv_codec_r, rv_fmt_rd_rs1_rs2, rvcp_sltu, 0, 0, 0 }, + { "xor", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "srl", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sra", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "or", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "and", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "fence", rv_codec_r_f, rv_fmt_pred_succ, NULL, 0, 0, 0 }, + { "fence.i", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "lwu", rv_codec_i, rv_fmt_rd_offset_rs1, NULL, 0, 0, 0 }, + { "ld", rv_codec_i, rv_fmt_rd_offset_rs1, NULL, 0, 0, 0 }, + { "sd", rv_codec_s, rv_fmt_rs2_offset_rs1, NULL, 0, 0, 0 }, + { "addiw", rv_codec_i, rv_fmt_rd_rs1_imm, rvcp_addiw, 0, 0, 0 }, + { "slliw", rv_codec_i_sh5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "srliw", rv_codec_i_sh5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "sraiw", rv_codec_i_sh5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "addw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "subw", rv_codec_r, rv_fmt_rd_rs1_rs2, rvcp_subw, 0, 0, 0 }, + { "sllw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "srlw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sraw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "ldu", rv_codec_i, rv_fmt_rd_offset_rs1, NULL, 0, 0, 0 }, + { "lq", rv_codec_i, rv_fmt_rd_offset_rs1, NULL, 0, 0, 0 }, + { "sq", rv_codec_s, rv_fmt_rs2_offset_rs1, NULL, 0, 0, 0 }, + { "addid", rv_codec_i, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "sllid", rv_codec_i_sh6, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "srlid", rv_codec_i_sh6, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "sraid", rv_codec_i_sh6, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "addd", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "subd", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "slld", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "srld", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "srad", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "mul", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "mulh", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "mulhsu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "mulhu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "div", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "divu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "rem", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "remu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "mulw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "divw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "divuw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "remw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "remuw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "muld", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "divd", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "divud", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "remd", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "remud", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "lr.w", rv_codec_r_l, rv_fmt_aqrl_rd_rs1, NULL, 0, 0, 0 }, + { "sc.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoswap.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoadd.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoxor.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoor.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoand.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amomin.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amomax.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amominu.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amomaxu.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "lr.d", rv_codec_r_l, rv_fmt_aqrl_rd_rs1, NULL, 0, 0, 0 }, + { "sc.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoswap.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoadd.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoxor.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoor.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoand.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amomin.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amomax.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amominu.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amomaxu.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "lr.q", rv_codec_r_l, rv_fmt_aqrl_rd_rs1, NULL, 0, 0, 0 }, + { "sc.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoswap.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoadd.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoxor.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoor.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amoand.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amomin.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amomax.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amominu.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amomaxu.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "ecall", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "ebreak", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "uret", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "sret", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "hret", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "mret", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "dret", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "sfence.vm", rv_codec_r, rv_fmt_rs1, NULL, 0, 0, 0 }, + { "sfence.vma", rv_codec_r, rv_fmt_rs1_rs2, NULL, 0, 0, 0 }, + { "wfi", rv_codec_none, rv_fmt_none, NULL, 0, 0, 0 }, + { "csrrw", rv_codec_i_csr, rv_fmt_rd_csr_rs1, rvcp_csrrw, 0, 0, 0 }, + { "csrrs", rv_codec_i_csr, rv_fmt_rd_csr_rs1, rvcp_csrrs, 0, 0, 0 }, + { "csrrc", rv_codec_i_csr, rv_fmt_rd_csr_rs1, NULL, 0, 0, 0 }, + { "csrrwi", rv_codec_i_csr, rv_fmt_rd_csr_zimm, rvcp_csrrwi, 0, 0, 0 }, + { "csrrsi", rv_codec_i_csr, rv_fmt_rd_csr_zimm, NULL, 0, 0, 0 }, + { "csrrci", rv_codec_i_csr, rv_fmt_rd_csr_zimm, NULL, 0, 0, 0 }, + { "flw", rv_codec_i, rv_fmt_frd_offset_rs1, NULL, 0, 0, 0 }, + { "fsw", rv_codec_s, rv_fmt_frs2_offset_rs1, NULL, 0, 0, 0 }, + { "fmadd.s", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fmsub.s", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fnmsub.s", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fnmadd.s", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fadd.s", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fsub.s", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fmul.s", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fdiv.s", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fsgnj.s", rv_codec_r, rv_fmt_frd_frs1_frs2, rvcp_fsgnj_s, 0, 0, 0 }, + { "fsgnjn.s", rv_codec_r, rv_fmt_frd_frs1_frs2, rvcp_fsgnjn_s, 0, 0, 0 }, + { "fsgnjx.s", rv_codec_r, rv_fmt_frd_frs1_frs2, rvcp_fsgnjx_s, 0, 0, 0 }, + { "fmin.s", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fmax.s", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fsqrt.s", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 }, + { "fle.s", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 }, + { "flt.s", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 }, + { "feq.s", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 }, + { "fcvt.w.s", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.wu.s", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.s.w", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fcvt.s.wu", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fmv.x.s", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 }, + { "fclass.s", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 }, + { "fmv.s.x", rv_codec_r, rv_fmt_frd_rs1, NULL, 0, 0, 0 }, + { "fcvt.l.s", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.lu.s", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.s.l", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fcvt.s.lu", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fld", rv_codec_i, rv_fmt_frd_offset_rs1, NULL, 0, 0, 0 }, + { "fsd", rv_codec_s, rv_fmt_frs2_offset_rs1, NULL, 0, 0, 0 }, + { "fmadd.d", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fmsub.d", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fnmsub.d", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fnmadd.d", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fadd.d", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fsub.d", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fmul.d", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fdiv.d", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fsgnj.d", rv_codec_r, rv_fmt_frd_frs1_frs2, rvcp_fsgnj_d, 0, 0, 0 }, + { "fsgnjn.d", rv_codec_r, rv_fmt_frd_frs1_frs2, rvcp_fsgnjn_d, 0, 0, 0 }, + { "fsgnjx.d", rv_codec_r, rv_fmt_frd_frs1_frs2, rvcp_fsgnjx_d, 0, 0, 0 }, + { "fmin.d", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fmax.d", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fcvt.s.d", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 }, + { "fcvt.d.s", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 }, + { "fsqrt.d", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 }, + { "fle.d", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 }, + { "flt.d", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 }, + { "feq.d", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 }, + { "fcvt.w.d", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.wu.d", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.d.w", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fcvt.d.wu", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fclass.d", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.l.d", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.lu.d", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fmv.x.d", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.d.l", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fcvt.d.lu", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fmv.d.x", rv_codec_r, rv_fmt_frd_rs1, NULL, 0, 0, 0 }, + { "flq", rv_codec_i, rv_fmt_frd_offset_rs1, NULL, 0, 0, 0 }, + { "fsq", rv_codec_s, rv_fmt_frs2_offset_rs1, NULL, 0, 0, 0 }, + { "fmadd.q", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fmsub.q", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fnmsub.q", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fnmadd.q", rv_codec_r4_m, rv_fmt_rm_frd_frs1_frs2_frs3, NULL, 0, 0, 0 }, + { "fadd.q", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fsub.q", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fmul.q", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fdiv.q", rv_codec_r_m, rv_fmt_rm_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fsgnj.q", rv_codec_r, rv_fmt_frd_frs1_frs2, rvcp_fsgnj_q, 0, 0, 0 }, + { "fsgnjn.q", rv_codec_r, rv_fmt_frd_frs1_frs2, rvcp_fsgnjn_q, 0, 0, 0 }, + { "fsgnjx.q", rv_codec_r, rv_fmt_frd_frs1_frs2, rvcp_fsgnjx_q, 0, 0, 0 }, + { "fmin.q", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fmax.q", rv_codec_r, rv_fmt_frd_frs1_frs2, NULL, 0, 0, 0 }, + { "fcvt.s.q", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 }, + { "fcvt.q.s", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 }, + { "fcvt.d.q", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 }, + { "fcvt.q.d", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 }, + { "fsqrt.q", rv_codec_r_m, rv_fmt_rm_frd_frs1, NULL, 0, 0, 0 }, + { "fle.q", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 }, + { "flt.q", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 }, + { "feq.q", rv_codec_r, rv_fmt_rd_frs1_frs2, NULL, 0, 0, 0 }, + { "fcvt.w.q", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.wu.q", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.q.w", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fcvt.q.wu", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fclass.q", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.l.q", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.lu.q", rv_codec_r_m, rv_fmt_rm_rd_frs1, NULL, 0, 0, 0 }, + { "fcvt.q.l", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fcvt.q.lu", rv_codec_r_m, rv_fmt_rm_frd_rs1, NULL, 0, 0, 0 }, + { "fmv.x.q", rv_codec_r, rv_fmt_rd_frs1, NULL, 0, 0, 0 }, + { "fmv.q.x", rv_codec_r, rv_fmt_frd_rs1, NULL, 0, 0, 0 }, + { "c.addi4spn", rv_codec_ciw_4spn, rv_fmt_rd_rs1_imm, NULL, rv_op_addi, rv_op_addi, rv_op_addi, rvcd_imm_nz }, + { "c.fld", rv_codec_cl_ld, rv_fmt_frd_offset_rs1, NULL, rv_op_fld, rv_op_fld, 0 }, + { "c.lw", rv_codec_cl_lw, rv_fmt_rd_offset_rs1, NULL, rv_op_lw, rv_op_lw, rv_op_lw }, + { "c.flw", rv_codec_cl_lw, rv_fmt_frd_offset_rs1, NULL, rv_op_flw, 0, 0 }, + { "c.fsd", rv_codec_cs_sd, rv_fmt_frs2_offset_rs1, NULL, rv_op_fsd, rv_op_fsd, 0 }, + { "c.sw", rv_codec_cs_sw, rv_fmt_rs2_offset_rs1, NULL, rv_op_sw, rv_op_sw, rv_op_sw }, + { "c.fsw", rv_codec_cs_sw, rv_fmt_frs2_offset_rs1, NULL, rv_op_fsw, 0, 0 }, + { "c.nop", rv_codec_ci_none, rv_fmt_none, NULL, rv_op_addi, rv_op_addi, rv_op_addi }, + { "c.addi", rv_codec_ci, rv_fmt_rd_rs1_imm, NULL, rv_op_addi, rv_op_addi, rv_op_addi, rvcd_imm_nz_hint }, + { "c.jal", rv_codec_cj_jal, rv_fmt_rd_offset, NULL, rv_op_jal, 0, 0 }, + { "c.li", rv_codec_ci_li, rv_fmt_rd_rs1_imm, NULL, rv_op_addi, rv_op_addi, rv_op_addi }, + { "c.addi16sp", rv_codec_ci_16sp, rv_fmt_rd_rs1_imm, NULL, rv_op_addi, rv_op_addi, rv_op_addi, rvcd_imm_nz }, + { "c.lui", rv_codec_ci_lui, rv_fmt_rd_imm, NULL, rv_op_lui, rv_op_lui, rv_op_lui, rvcd_imm_nz }, + { "c.srli", rv_codec_cb_sh6, rv_fmt_rd_rs1_imm, NULL, rv_op_srli, rv_op_srli, rv_op_srli, rvcd_imm_nz }, + { "c.srai", rv_codec_cb_sh6, rv_fmt_rd_rs1_imm, NULL, rv_op_srai, rv_op_srai, rv_op_srai, rvcd_imm_nz }, + { "c.andi", rv_codec_cb_imm, rv_fmt_rd_rs1_imm, NULL, rv_op_andi, rv_op_andi, rv_op_andi, rvcd_imm_nz }, + { "c.sub", rv_codec_cs, rv_fmt_rd_rs1_rs2, NULL, rv_op_sub, rv_op_sub, rv_op_sub }, + { "c.xor", rv_codec_cs, rv_fmt_rd_rs1_rs2, NULL, rv_op_xor, rv_op_xor, rv_op_xor }, + { "c.or", rv_codec_cs, rv_fmt_rd_rs1_rs2, NULL, rv_op_or, rv_op_or, rv_op_or }, + { "c.and", rv_codec_cs, rv_fmt_rd_rs1_rs2, NULL, rv_op_and, rv_op_and, rv_op_and }, + { "c.subw", rv_codec_cs, rv_fmt_rd_rs1_rs2, NULL, rv_op_subw, rv_op_subw, rv_op_subw }, + { "c.addw", rv_codec_cs, rv_fmt_rd_rs1_rs2, NULL, rv_op_addw, rv_op_addw, rv_op_addw }, + { "c.j", rv_codec_cj, rv_fmt_rd_offset, NULL, rv_op_jal, rv_op_jal, rv_op_jal }, + { "c.beqz", rv_codec_cb, rv_fmt_rs1_rs2_offset, NULL, rv_op_beq, rv_op_beq, rv_op_beq }, + { "c.bnez", rv_codec_cb, rv_fmt_rs1_rs2_offset, NULL, rv_op_bne, rv_op_bne, rv_op_bne }, + { "c.slli", rv_codec_ci_sh6, rv_fmt_rd_rs1_imm, NULL, rv_op_slli, rv_op_slli, rv_op_slli, rvcd_imm_nz }, + { "c.fldsp", rv_codec_ci_ldsp, rv_fmt_frd_offset_rs1, NULL, rv_op_fld, rv_op_fld, rv_op_fld }, + { "c.lwsp", rv_codec_ci_lwsp, rv_fmt_rd_offset_rs1, NULL, rv_op_lw, rv_op_lw, rv_op_lw }, + { "c.flwsp", rv_codec_ci_lwsp, rv_fmt_frd_offset_rs1, NULL, rv_op_flw, 0, 0 }, + { "c.jr", rv_codec_cr_jr, rv_fmt_rd_rs1_offset, NULL, rv_op_jalr, rv_op_jalr, rv_op_jalr }, + { "c.mv", rv_codec_cr_mv, rv_fmt_rd_rs1_rs2, NULL, rv_op_addi, rv_op_addi, rv_op_addi }, + { "c.ebreak", rv_codec_ci_none, rv_fmt_none, NULL, rv_op_ebreak, rv_op_ebreak, rv_op_ebreak }, + { "c.jalr", rv_codec_cr_jalr, rv_fmt_rd_rs1_offset, NULL, rv_op_jalr, rv_op_jalr, rv_op_jalr }, + { "c.add", rv_codec_cr, rv_fmt_rd_rs1_rs2, NULL, rv_op_add, rv_op_add, rv_op_add }, + { "c.fsdsp", rv_codec_css_sdsp, rv_fmt_frs2_offset_rs1, NULL, rv_op_fsd, rv_op_fsd, rv_op_fsd }, + { "c.swsp", rv_codec_css_swsp, rv_fmt_rs2_offset_rs1, NULL, rv_op_sw, rv_op_sw, rv_op_sw }, + { "c.fswsp", rv_codec_css_swsp, rv_fmt_frs2_offset_rs1, NULL, rv_op_fsw, 0, 0 }, + { "c.ld", rv_codec_cl_ld, rv_fmt_rd_offset_rs1, NULL, 0, rv_op_ld, rv_op_ld }, + { "c.sd", rv_codec_cs_sd, rv_fmt_rs2_offset_rs1, NULL, 0, rv_op_sd, rv_op_sd }, + { "c.addiw", rv_codec_ci, rv_fmt_rd_rs1_imm, NULL, 0, rv_op_addiw, rv_op_addiw }, + { "c.ldsp", rv_codec_ci_ldsp, rv_fmt_rd_offset_rs1, NULL, 0, rv_op_ld, rv_op_ld }, + { "c.sdsp", rv_codec_css_sdsp, rv_fmt_rs2_offset_rs1, NULL, 0, rv_op_sd, rv_op_sd }, + { "c.lq", rv_codec_cl_lq, rv_fmt_rd_offset_rs1, NULL, 0, 0, rv_op_lq }, + { "c.sq", rv_codec_cs_sq, rv_fmt_rs2_offset_rs1, NULL, 0, 0, rv_op_sq }, + { "c.lqsp", rv_codec_ci_lqsp, rv_fmt_rd_offset_rs1, NULL, 0, 0, rv_op_lq }, + { "c.sqsp", rv_codec_css_sqsp, rv_fmt_rs2_offset_rs1, NULL, 0, 0, rv_op_sq }, + { "nop", rv_codec_i, rv_fmt_none, NULL, 0, 0, 0 }, + { "mv", rv_codec_i, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "not", rv_codec_i, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "neg", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 }, + { "negw", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 }, + { "sext.w", rv_codec_i, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "seqz", rv_codec_i, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "snez", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 }, + { "sltz", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "sgtz", rv_codec_r, rv_fmt_rd_rs2, NULL, 0, 0, 0 }, + { "fmv.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fabs.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fneg.s", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fmv.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fabs.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fneg.d", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fmv.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fabs.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fneg.q", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "beqz", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 }, + { "bnez", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 }, + { "blez", rv_codec_sb, rv_fmt_rs2_offset, NULL, 0, 0, 0 }, + { "bgez", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 }, + { "bltz", rv_codec_sb, rv_fmt_rs1_offset, NULL, 0, 0, 0 }, + { "bgtz", rv_codec_sb, rv_fmt_rs2_offset, NULL, 0, 0, 0 }, + { "ble", rv_codec_sb, rv_fmt_rs2_rs1_offset, NULL, 0, 0, 0 }, + { "bleu", rv_codec_sb, rv_fmt_rs2_rs1_offset, NULL, 0, 0, 0 }, + { "bgt", rv_codec_sb, rv_fmt_rs2_rs1_offset, NULL, 0, 0, 0 }, + { "bgtu", rv_codec_sb, rv_fmt_rs2_rs1_offset, NULL, 0, 0, 0 }, + { "j", rv_codec_uj, rv_fmt_offset, NULL, 0, 0, 0 }, + { "ret", rv_codec_i, rv_fmt_none, NULL, 0, 0, 0 }, + { "jr", rv_codec_i, rv_fmt_rs1, NULL, 0, 0, 0 }, + { "rdcycle", rv_codec_i_csr, rv_fmt_rd, NULL, 0, 0, 0 }, + { "rdtime", rv_codec_i_csr, rv_fmt_rd, NULL, 0, 0, 0 }, + { "rdinstret", rv_codec_i_csr, rv_fmt_rd, NULL, 0, 0, 0 }, + { "rdcycleh", rv_codec_i_csr, rv_fmt_rd, NULL, 0, 0, 0 }, + { "rdtimeh", rv_codec_i_csr, rv_fmt_rd, NULL, 0, 0, 0 }, + { "rdinstreth", rv_codec_i_csr, rv_fmt_rd, NULL, 0, 0, 0 }, + { "frcsr", rv_codec_i_csr, rv_fmt_rd, NULL, 0, 0, 0 }, + { "frrm", rv_codec_i_csr, rv_fmt_rd, NULL, 0, 0, 0 }, + { "frflags", rv_codec_i_csr, rv_fmt_rd, NULL, 0, 0, 0 }, + { "fscsr", rv_codec_i_csr, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fsrm", rv_codec_i_csr, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fsflags", rv_codec_i_csr, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "fsrmi", rv_codec_i_csr, rv_fmt_rd_zimm, NULL, 0, 0, 0 }, + { "fsflagsi", rv_codec_i_csr, rv_fmt_rd_zimm, NULL, 0, 0, 0 }, +}; + +/* CSR names */ + +static const char *csr_name(int csrno) +{ + switch (csrno) { + case 0x0000: return "ustatus"; + case 0x0001: return "fflags"; + case 0x0002: return "frm"; + case 0x0003: return "fcsr"; + case 0x0004: return "uie"; + case 0x0005: return "utvec"; + case 0x0007: return "utvt"; + case 0x0008: return "vstart"; + case 0x0009: return "vxsat"; + case 0x000a: return "vxrm"; + case 0x000f: return "vcsr"; + case 0x0040: return "uscratch"; + case 0x0041: return "uepc"; + case 0x0042: return "ucause"; + case 0x0043: return "utval"; + case 0x0044: return "uip"; + case 0x0045: return "unxti"; + case 0x0046: return "uintstatus"; + case 0x0048: return "uscratchcsw"; + case 0x0049: return "uscratchcswl"; + case 0x0100: return "sstatus"; + case 0x0102: return "sedeleg"; + case 0x0103: return "sideleg"; + case 0x0104: return "sie"; + case 0x0105: return "stvec"; + case 0x0106: return "scounteren"; + case 0x0107: return "stvt"; + case 0x0140: return "sscratch"; + case 0x0141: return "sepc"; + case 0x0142: return "scause"; + case 0x0143: return "stval"; + case 0x0144: return "sip"; + case 0x0145: return "snxti"; + case 0x0146: return "sintstatus"; + case 0x0148: return "sscratchcsw"; + case 0x0149: return "sscratchcswl"; + case 0x0180: return "satp"; + case 0x0200: return "vsstatus"; + case 0x0204: return "vsie"; + case 0x0205: return "vstvec"; + case 0x0240: return "vsscratch"; + case 0x0241: return "vsepc"; + case 0x0242: return "vscause"; + case 0x0243: return "vstval"; + case 0x0244: return "vsip"; + case 0x0280: return "vsatp"; + case 0x0300: return "mstatus"; + case 0x0301: return "misa"; + case 0x0302: return "medeleg"; + case 0x0303: return "mideleg"; + case 0x0304: return "mie"; + case 0x0305: return "mtvec"; + case 0x0306: return "mcounteren"; + case 0x0307: return "mtvt"; + case 0x0310: return "mstatush"; + case 0x0320: return "mcountinhibit"; + case 0x0323: return "mhpmevent3"; + case 0x0324: return "mhpmevent4"; + case 0x0325: return "mhpmevent5"; + case 0x0326: return "mhpmevent6"; + case 0x0327: return "mhpmevent7"; + case 0x0328: return "mhpmevent8"; + case 0x0329: return "mhpmevent9"; + case 0x032a: return "mhpmevent10"; + case 0x032b: return "mhpmevent11"; + case 0x032c: return "mhpmevent12"; + case 0x032d: return "mhpmevent13"; + case 0x032e: return "mhpmevent14"; + case 0x032f: return "mhpmevent15"; + case 0x0330: return "mhpmevent16"; + case 0x0331: return "mhpmevent17"; + case 0x0332: return "mhpmevent18"; + case 0x0333: return "mhpmevent19"; + case 0x0334: return "mhpmevent20"; + case 0x0335: return "mhpmevent21"; + case 0x0336: return "mhpmevent22"; + case 0x0337: return "mhpmevent23"; + case 0x0338: return "mhpmevent24"; + case 0x0339: return "mhpmevent25"; + case 0x033a: return "mhpmevent26"; + case 0x033b: return "mhpmevent27"; + case 0x033c: return "mhpmevent28"; + case 0x033d: return "mhpmevent29"; + case 0x033e: return "mhpmevent30"; + case 0x033f: return "mhpmevent31"; + case 0x0340: return "mscratch"; + case 0x0341: return "mepc"; + case 0x0342: return "mcause"; + case 0x0343: return "mtval"; + case 0x0344: return "mip"; + case 0x0345: return "mnxti"; + case 0x0346: return "mintstatus"; + case 0x0348: return "mscratchcsw"; + case 0x0349: return "mscratchcswl"; + case 0x034a: return "mtinst"; + case 0x034b: return "mtval2"; + case 0x03a0: return "pmpcfg0"; + case 0x03a1: return "pmpcfg1"; + case 0x03a2: return "pmpcfg2"; + case 0x03a3: return "pmpcfg3"; + case 0x03b0: return "pmpaddr0"; + case 0x03b1: return "pmpaddr1"; + case 0x03b2: return "pmpaddr2"; + case 0x03b3: return "pmpaddr3"; + case 0x03b4: return "pmpaddr4"; + case 0x03b5: return "pmpaddr5"; + case 0x03b6: return "pmpaddr6"; + case 0x03b7: return "pmpaddr7"; + case 0x03b8: return "pmpaddr8"; + case 0x03b9: return "pmpaddr9"; + case 0x03ba: return "pmpaddr10"; + case 0x03bb: return "pmpaddr11"; + case 0x03bc: return "pmpaddr12"; + case 0x03bd: return "pmpaddr13"; + case 0x03be: return "pmpaddr14"; + case 0x03bf: return "pmpaddr15"; + case 0x0600: return "hstatus"; + case 0x0602: return "hedeleg"; + case 0x0603: return "hideleg"; + case 0x0604: return "hie"; + case 0x0605: return "htimedelta"; + case 0x0606: return "hcounteren"; + case 0x0607: return "hgeie"; + case 0x0615: return "htimedeltah"; + case 0x0643: return "htval"; + case 0x0644: return "hip"; + case 0x0645: return "hvip"; + case 0x064a: return "htinst"; + case 0x0680: return "hgatp"; + case 0x07a0: return "tselect"; + case 0x07a1: return "tdata1"; + case 0x07a2: return "tdata2"; + case 0x07a3: return "tdata3"; + case 0x07a4: return "tinfo"; + case 0x07a5: return "tcontrol"; + case 0x07a8: return "mcontext"; + case 0x07a9: return "mnoise"; + case 0x07aa: return "scontext"; + case 0x07b0: return "dcsr"; + case 0x07b1: return "dpc"; + case 0x07b2: return "dscratch0"; + case 0x07b3: return "dscratch1"; + case 0x0b00: return "mcycle"; + case 0x0b02: return "minstret"; + case 0x0b03: return "mhpmcounter3"; + case 0x0b04: return "mhpmcounter4"; + case 0x0b05: return "mhpmcounter5"; + case 0x0b06: return "mhpmcounter6"; + case 0x0b07: return "mhpmcounter7"; + case 0x0b08: return "mhpmcounter8"; + case 0x0b09: return "mhpmcounter9"; + case 0x0b0a: return "mhpmcounter10"; + case 0x0b0b: return "mhpmcounter11"; + case 0x0b0c: return "mhpmcounter12"; + case 0x0b0d: return "mhpmcounter13"; + case 0x0b0e: return "mhpmcounter14"; + case 0x0b0f: return "mhpmcounter15"; + case 0x0b10: return "mhpmcounter16"; + case 0x0b11: return "mhpmcounter17"; + case 0x0b12: return "mhpmcounter18"; + case 0x0b13: return "mhpmcounter19"; + case 0x0b14: return "mhpmcounter20"; + case 0x0b15: return "mhpmcounter21"; + case 0x0b16: return "mhpmcounter22"; + case 0x0b17: return "mhpmcounter23"; + case 0x0b18: return "mhpmcounter24"; + case 0x0b19: return "mhpmcounter25"; + case 0x0b1a: return "mhpmcounter26"; + case 0x0b1b: return "mhpmcounter27"; + case 0x0b1c: return "mhpmcounter28"; + case 0x0b1d: return "mhpmcounter29"; + case 0x0b1e: return "mhpmcounter30"; + case 0x0b1f: return "mhpmcounter31"; + case 0x0b80: return "mcycleh"; + case 0x0b82: return "minstreth"; + case 0x0b83: return "mhpmcounter3h"; + case 0x0b84: return "mhpmcounter4h"; + case 0x0b85: return "mhpmcounter5h"; + case 0x0b86: return "mhpmcounter6h"; + case 0x0b87: return "mhpmcounter7h"; + case 0x0b88: return "mhpmcounter8h"; + case 0x0b89: return "mhpmcounter9h"; + case 0x0b8a: return "mhpmcounter10h"; + case 0x0b8b: return "mhpmcounter11h"; + case 0x0b8c: return "mhpmcounter12h"; + case 0x0b8d: return "mhpmcounter13h"; + case 0x0b8e: return "mhpmcounter14h"; + case 0x0b8f: return "mhpmcounter15h"; + case 0x0b90: return "mhpmcounter16h"; + case 0x0b91: return "mhpmcounter17h"; + case 0x0b92: return "mhpmcounter18h"; + case 0x0b93: return "mhpmcounter19h"; + case 0x0b94: return "mhpmcounter20h"; + case 0x0b95: return "mhpmcounter21h"; + case 0x0b96: return "mhpmcounter22h"; + case 0x0b97: return "mhpmcounter23h"; + case 0x0b98: return "mhpmcounter24h"; + case 0x0b99: return "mhpmcounter25h"; + case 0x0b9a: return "mhpmcounter26h"; + case 0x0b9b: return "mhpmcounter27h"; + case 0x0b9c: return "mhpmcounter28h"; + case 0x0b9d: return "mhpmcounter29h"; + case 0x0b9e: return "mhpmcounter30h"; + case 0x0b9f: return "mhpmcounter31h"; + case 0x0c00: return "cycle"; + case 0x0c01: return "time"; + case 0x0c02: return "instret"; + case 0x0c03: return "hpmcounter3"; + case 0x0c04: return "hpmcounter4"; + case 0x0c05: return "hpmcounter5"; + case 0x0c06: return "hpmcounter6"; + case 0x0c07: return "hpmcounter7"; + case 0x0c08: return "hpmcounter8"; + case 0x0c09: return "hpmcounter9"; + case 0x0c0a: return "hpmcounter10"; + case 0x0c0b: return "hpmcounter11"; + case 0x0c0c: return "hpmcounter12"; + case 0x0c0d: return "hpmcounter13"; + case 0x0c0e: return "hpmcounter14"; + case 0x0c0f: return "hpmcounter15"; + case 0x0c10: return "hpmcounter16"; + case 0x0c11: return "hpmcounter17"; + case 0x0c12: return "hpmcounter18"; + case 0x0c13: return "hpmcounter19"; + case 0x0c14: return "hpmcounter20"; + case 0x0c15: return "hpmcounter21"; + case 0x0c16: return "hpmcounter22"; + case 0x0c17: return "hpmcounter23"; + case 0x0c18: return "hpmcounter24"; + case 0x0c19: return "hpmcounter25"; + case 0x0c1a: return "hpmcounter26"; + case 0x0c1b: return "hpmcounter27"; + case 0x0c1c: return "hpmcounter28"; + case 0x0c1d: return "hpmcounter29"; + case 0x0c1e: return "hpmcounter30"; + case 0x0c1f: return "hpmcounter31"; + case 0x0c20: return "vl"; + case 0x0c21: return "vtype"; + case 0x0c22: return "vlenb"; + case 0x0c80: return "cycleh"; + case 0x0c81: return "timeh"; + case 0x0c82: return "instreth"; + case 0x0c83: return "hpmcounter3h"; + case 0x0c84: return "hpmcounter4h"; + case 0x0c85: return "hpmcounter5h"; + case 0x0c86: return "hpmcounter6h"; + case 0x0c87: return "hpmcounter7h"; + case 0x0c88: return "hpmcounter8h"; + case 0x0c89: return "hpmcounter9h"; + case 0x0c8a: return "hpmcounter10h"; + case 0x0c8b: return "hpmcounter11h"; + case 0x0c8c: return "hpmcounter12h"; + case 0x0c8d: return "hpmcounter13h"; + case 0x0c8e: return "hpmcounter14h"; + case 0x0c8f: return "hpmcounter15h"; + case 0x0c90: return "hpmcounter16h"; + case 0x0c91: return "hpmcounter17h"; + case 0x0c92: return "hpmcounter18h"; + case 0x0c93: return "hpmcounter19h"; + case 0x0c94: return "hpmcounter20h"; + case 0x0c95: return "hpmcounter21h"; + case 0x0c96: return "hpmcounter22h"; + case 0x0c97: return "hpmcounter23h"; + case 0x0c98: return "hpmcounter24h"; + case 0x0c99: return "hpmcounter25h"; + case 0x0c9a: return "hpmcounter26h"; + case 0x0c9b: return "hpmcounter27h"; + case 0x0c9c: return "hpmcounter28h"; + case 0x0c9d: return "hpmcounter29h"; + case 0x0c9e: return "hpmcounter30h"; + case 0x0c9f: return "hpmcounter31h"; + case 0x0e12: return "hgeip"; + case 0x0f11: return "mvendorid"; + case 0x0f12: return "marchid"; + case 0x0f13: return "mimpid"; + case 0x0f14: return "mhartid"; + case 0x0f15: return "mentropy"; + default: return NULL; + } +} + +/* decode opcode */ + +static void decode_inst_opcode(rv_decode *dec, rv_isa isa) +{ + rv_inst inst = dec->inst; + rv_opcode op = rv_op_illegal; + switch (((inst >> 0) & 0b11)) { + case 0: + switch (((inst >> 13) & 0b111)) { + case 0: op = rv_op_c_addi4spn; break; + case 1: op = (isa == rv128) ? rv_op_c_lq : rv_op_c_fld; break; + case 2: op = rv_op_c_lw; break; + case 3: op = (isa == rv32) ? rv_op_c_flw : rv_op_c_ld; break; + case 5: op = (isa == rv128) ? rv_op_c_sq : rv_op_c_fsd; break; + case 6: op = rv_op_c_sw; break; + case 7: op = (isa == rv32) ? rv_op_c_fsw : rv_op_c_sd; break; + } + break; + case 1: + switch (((inst >> 13) & 0b111)) { + case 0: + switch (((inst >> 2) & 0b11111111111)) { + case 0: op = rv_op_c_nop; break; + default: op = rv_op_c_addi; break; + } + break; + case 1: op = (isa == rv32) ? rv_op_c_jal : rv_op_c_addiw; break; + case 2: op = rv_op_c_li; break; + case 3: + switch (((inst >> 7) & 0b11111)) { + case 2: op = rv_op_c_addi16sp; break; + default: op = rv_op_c_lui; break; + } + break; + case 4: + switch (((inst >> 10) & 0b11)) { + case 0: + op = rv_op_c_srli; + break; + case 1: + op = rv_op_c_srai; + break; + case 2: op = rv_op_c_andi; break; + case 3: + switch (((inst >> 10) & 0b100) | ((inst >> 5) & 0b011)) { + case 0: op = rv_op_c_sub; break; + case 1: op = rv_op_c_xor; break; + case 2: op = rv_op_c_or; break; + case 3: op = rv_op_c_and; break; + case 4: op = rv_op_c_subw; break; + case 5: op = rv_op_c_addw; break; + } + break; + } + break; + case 5: op = rv_op_c_j; break; + case 6: op = rv_op_c_beqz; break; + case 7: op = rv_op_c_bnez; break; + } + break; + case 2: + switch (((inst >> 13) & 0b111)) { + case 0: + op = rv_op_c_slli; + break; + case 1: op = (isa == rv128) ? rv_op_c_lqsp : rv_op_c_fldsp; break; + case 2: op = rv_op_c_lwsp; break; + case 3: op = (isa == rv32) ? rv_op_c_flwsp : rv_op_c_ldsp; break; + case 4: + switch (((inst >> 12) & 0b1)) { + case 0: + switch (((inst >> 2) & 0b11111)) { + case 0: op = rv_op_c_jr; break; + default: op = rv_op_c_mv; break; + } + break; + case 1: + switch (((inst >> 2) & 0b11111)) { + case 0: + switch (((inst >> 7) & 0b11111)) { + case 0: op = rv_op_c_ebreak; break; + default: op = rv_op_c_jalr; break; + } + break; + default: op = rv_op_c_add; break; + } + break; + } + break; + case 5: op = (isa == rv128) ? rv_op_c_sqsp : rv_op_c_fsdsp; break; + case 6: op = rv_op_c_swsp; break; + case 7: op = (isa == rv32) ? rv_op_c_fswsp : rv_op_c_sdsp; break; + } + break; + case 3: + switch (((inst >> 2) & 0b11111)) { + case 0: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_lb; break; + case 1: op = rv_op_lh; break; + case 2: op = rv_op_lw; break; + case 3: op = rv_op_ld; break; + case 4: op = rv_op_lbu; break; + case 5: op = rv_op_lhu; break; + case 6: op = rv_op_lwu; break; + case 7: op = rv_op_ldu; break; + } + break; + case 1: + switch (((inst >> 12) & 0b111)) { + case 2: op = rv_op_flw; break; + case 3: op = rv_op_fld; break; + case 4: op = rv_op_flq; break; + } + break; + case 3: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fence; break; + case 1: op = rv_op_fence_i; break; + case 2: op = rv_op_lq; break; + } + break; + case 4: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_addi; break; + case 1: + switch (((inst >> 27) & 0b11111)) { + case 0: op = rv_op_slli; break; + } + break; + case 2: op = rv_op_slti; break; + case 3: op = rv_op_sltiu; break; + case 4: op = rv_op_xori; break; + case 5: + switch (((inst >> 27) & 0b11111)) { + case 0: op = rv_op_srli; break; + case 8: op = rv_op_srai; break; + } + break; + case 6: op = rv_op_ori; break; + case 7: op = rv_op_andi; break; + } + break; + case 5: op = rv_op_auipc; break; + case 6: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_addiw; break; + case 1: + switch (((inst >> 25) & 0b1111111)) { + case 0: op = rv_op_slliw; break; + } + break; + case 5: + switch (((inst >> 25) & 0b1111111)) { + case 0: op = rv_op_srliw; break; + case 32: op = rv_op_sraiw; break; + } + break; + } + break; + case 8: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_sb; break; + case 1: op = rv_op_sh; break; + case 2: op = rv_op_sw; break; + case 3: op = rv_op_sd; break; + case 4: op = rv_op_sq; break; + } + break; + case 9: + switch (((inst >> 12) & 0b111)) { + case 2: op = rv_op_fsw; break; + case 3: op = rv_op_fsd; break; + case 4: op = rv_op_fsq; break; + } + break; + case 11: + switch (((inst >> 24) & 0b11111000) | ((inst >> 12) & 0b00000111)) { + case 2: op = rv_op_amoadd_w; break; + case 3: op = rv_op_amoadd_d; break; + case 4: op = rv_op_amoadd_q; break; + case 10: op = rv_op_amoswap_w; break; + case 11: op = rv_op_amoswap_d; break; + case 12: op = rv_op_amoswap_q; break; + case 18: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_lr_w; break; + } + break; + case 19: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_lr_d; break; + } + break; + case 20: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_lr_q; break; + } + break; + case 26: op = rv_op_sc_w; break; + case 27: op = rv_op_sc_d; break; + case 28: op = rv_op_sc_q; break; + case 34: op = rv_op_amoxor_w; break; + case 35: op = rv_op_amoxor_d; break; + case 36: op = rv_op_amoxor_q; break; + case 66: op = rv_op_amoor_w; break; + case 67: op = rv_op_amoor_d; break; + case 68: op = rv_op_amoor_q; break; + case 98: op = rv_op_amoand_w; break; + case 99: op = rv_op_amoand_d; break; + case 100: op = rv_op_amoand_q; break; + case 130: op = rv_op_amomin_w; break; + case 131: op = rv_op_amomin_d; break; + case 132: op = rv_op_amomin_q; break; + case 162: op = rv_op_amomax_w; break; + case 163: op = rv_op_amomax_d; break; + case 164: op = rv_op_amomax_q; break; + case 194: op = rv_op_amominu_w; break; + case 195: op = rv_op_amominu_d; break; + case 196: op = rv_op_amominu_q; break; + case 226: op = rv_op_amomaxu_w; break; + case 227: op = rv_op_amomaxu_d; break; + case 228: op = rv_op_amomaxu_q; break; + } + break; + case 12: + switch (((inst >> 22) & 0b1111111000) | ((inst >> 12) & 0b0000000111)) { + case 0: op = rv_op_add; break; + case 1: op = rv_op_sll; break; + case 2: op = rv_op_slt; break; + case 3: op = rv_op_sltu; break; + case 4: op = rv_op_xor; break; + case 5: op = rv_op_srl; break; + case 6: op = rv_op_or; break; + case 7: op = rv_op_and; break; + case 8: op = rv_op_mul; break; + case 9: op = rv_op_mulh; break; + case 10: op = rv_op_mulhsu; break; + case 11: op = rv_op_mulhu; break; + case 12: op = rv_op_div; break; + case 13: op = rv_op_divu; break; + case 14: op = rv_op_rem; break; + case 15: op = rv_op_remu; break; + case 256: op = rv_op_sub; break; + case 261: op = rv_op_sra; break; + } + break; + case 13: op = rv_op_lui; break; + case 14: + switch (((inst >> 22) & 0b1111111000) | ((inst >> 12) & 0b0000000111)) { + case 0: op = rv_op_addw; break; + case 1: op = rv_op_sllw; break; + case 5: op = rv_op_srlw; break; + case 8: op = rv_op_mulw; break; + case 12: op = rv_op_divw; break; + case 13: op = rv_op_divuw; break; + case 14: op = rv_op_remw; break; + case 15: op = rv_op_remuw; break; + case 256: op = rv_op_subw; break; + case 261: op = rv_op_sraw; break; + } + break; + case 16: + switch (((inst >> 25) & 0b11)) { + case 0: op = rv_op_fmadd_s; break; + case 1: op = rv_op_fmadd_d; break; + case 3: op = rv_op_fmadd_q; break; + } + break; + case 17: + switch (((inst >> 25) & 0b11)) { + case 0: op = rv_op_fmsub_s; break; + case 1: op = rv_op_fmsub_d; break; + case 3: op = rv_op_fmsub_q; break; + } + break; + case 18: + switch (((inst >> 25) & 0b11)) { + case 0: op = rv_op_fnmsub_s; break; + case 1: op = rv_op_fnmsub_d; break; + case 3: op = rv_op_fnmsub_q; break; + } + break; + case 19: + switch (((inst >> 25) & 0b11)) { + case 0: op = rv_op_fnmadd_s; break; + case 1: op = rv_op_fnmadd_d; break; + case 3: op = rv_op_fnmadd_q; break; + } + break; + case 20: + switch (((inst >> 25) & 0b1111111)) { + case 0: op = rv_op_fadd_s; break; + case 1: op = rv_op_fadd_d; break; + case 3: op = rv_op_fadd_q; break; + case 4: op = rv_op_fsub_s; break; + case 5: op = rv_op_fsub_d; break; + case 7: op = rv_op_fsub_q; break; + case 8: op = rv_op_fmul_s; break; + case 9: op = rv_op_fmul_d; break; + case 11: op = rv_op_fmul_q; break; + case 12: op = rv_op_fdiv_s; break; + case 13: op = rv_op_fdiv_d; break; + case 15: op = rv_op_fdiv_q; break; + case 16: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fsgnj_s; break; + case 1: op = rv_op_fsgnjn_s; break; + case 2: op = rv_op_fsgnjx_s; break; + } + break; + case 17: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fsgnj_d; break; + case 1: op = rv_op_fsgnjn_d; break; + case 2: op = rv_op_fsgnjx_d; break; + } + break; + case 19: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fsgnj_q; break; + case 1: op = rv_op_fsgnjn_q; break; + case 2: op = rv_op_fsgnjx_q; break; + } + break; + case 20: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fmin_s; break; + case 1: op = rv_op_fmax_s; break; + } + break; + case 21: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fmin_d; break; + case 1: op = rv_op_fmax_d; break; + } + break; + case 23: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fmin_q; break; + case 1: op = rv_op_fmax_q; break; + } + break; + case 32: + switch (((inst >> 20) & 0b11111)) { + case 1: op = rv_op_fcvt_s_d; break; + case 3: op = rv_op_fcvt_s_q; break; + } + break; + case 33: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fcvt_d_s; break; + case 3: op = rv_op_fcvt_d_q; break; + } + break; + case 35: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fcvt_q_s; break; + case 1: op = rv_op_fcvt_q_d; break; + } + break; + case 44: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fsqrt_s; break; + } + break; + case 45: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fsqrt_d; break; + } + break; + case 47: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fsqrt_q; break; + } + break; + case 80: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fle_s; break; + case 1: op = rv_op_flt_s; break; + case 2: op = rv_op_feq_s; break; + } + break; + case 81: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fle_d; break; + case 1: op = rv_op_flt_d; break; + case 2: op = rv_op_feq_d; break; + } + break; + case 83: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_fle_q; break; + case 1: op = rv_op_flt_q; break; + case 2: op = rv_op_feq_q; break; + } + break; + case 96: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fcvt_w_s; break; + case 1: op = rv_op_fcvt_wu_s; break; + case 2: op = rv_op_fcvt_l_s; break; + case 3: op = rv_op_fcvt_lu_s; break; + } + break; + case 97: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fcvt_w_d; break; + case 1: op = rv_op_fcvt_wu_d; break; + case 2: op = rv_op_fcvt_l_d; break; + case 3: op = rv_op_fcvt_lu_d; break; + } + break; + case 99: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fcvt_w_q; break; + case 1: op = rv_op_fcvt_wu_q; break; + case 2: op = rv_op_fcvt_l_q; break; + case 3: op = rv_op_fcvt_lu_q; break; + } + break; + case 104: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fcvt_s_w; break; + case 1: op = rv_op_fcvt_s_wu; break; + case 2: op = rv_op_fcvt_s_l; break; + case 3: op = rv_op_fcvt_s_lu; break; + } + break; + case 105: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fcvt_d_w; break; + case 1: op = rv_op_fcvt_d_wu; break; + case 2: op = rv_op_fcvt_d_l; break; + case 3: op = rv_op_fcvt_d_lu; break; + } + break; + case 107: + switch (((inst >> 20) & 0b11111)) { + case 0: op = rv_op_fcvt_q_w; break; + case 1: op = rv_op_fcvt_q_wu; break; + case 2: op = rv_op_fcvt_q_l; break; + case 3: op = rv_op_fcvt_q_lu; break; + } + break; + case 112: + switch (((inst >> 17) & 0b11111000) | ((inst >> 12) & 0b00000111)) { + case 0: op = rv_op_fmv_x_s; break; + case 1: op = rv_op_fclass_s; break; + } + break; + case 113: + switch (((inst >> 17) & 0b11111000) | ((inst >> 12) & 0b00000111)) { + case 0: op = rv_op_fmv_x_d; break; + case 1: op = rv_op_fclass_d; break; + } + break; + case 115: + switch (((inst >> 17) & 0b11111000) | ((inst >> 12) & 0b00000111)) { + case 0: op = rv_op_fmv_x_q; break; + case 1: op = rv_op_fclass_q; break; + } + break; + case 120: + switch (((inst >> 17) & 0b11111000) | ((inst >> 12) & 0b00000111)) { + case 0: op = rv_op_fmv_s_x; break; + } + break; + case 121: + switch (((inst >> 17) & 0b11111000) | ((inst >> 12) & 0b00000111)) { + case 0: op = rv_op_fmv_d_x; break; + } + break; + case 123: + switch (((inst >> 17) & 0b11111000) | ((inst >> 12) & 0b00000111)) { + case 0: op = rv_op_fmv_q_x; break; + } + break; + } + break; + case 22: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_addid; break; + case 1: + switch (((inst >> 26) & 0b111111)) { + case 0: op = rv_op_sllid; break; + } + break; + case 5: + switch (((inst >> 26) & 0b111111)) { + case 0: op = rv_op_srlid; break; + case 16: op = rv_op_sraid; break; + } + break; + } + break; + case 24: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_beq; break; + case 1: op = rv_op_bne; break; + case 4: op = rv_op_blt; break; + case 5: op = rv_op_bge; break; + case 6: op = rv_op_bltu; break; + case 7: op = rv_op_bgeu; break; + } + break; + case 25: + switch (((inst >> 12) & 0b111)) { + case 0: op = rv_op_jalr; break; + } + break; + case 27: op = rv_op_jal; break; + case 28: + switch (((inst >> 12) & 0b111)) { + case 0: + switch (((inst >> 20) & 0b111111100000) | ((inst >> 7) & 0b000000011111)) { + case 0: + switch (((inst >> 15) & 0b1111111111)) { + case 0: op = rv_op_ecall; break; + case 32: op = rv_op_ebreak; break; + case 64: op = rv_op_uret; break; + } + break; + case 256: + switch (((inst >> 20) & 0b11111)) { + case 2: + switch (((inst >> 15) & 0b11111)) { + case 0: op = rv_op_sret; break; + } + break; + case 4: op = rv_op_sfence_vm; break; + case 5: + switch (((inst >> 15) & 0b11111)) { + case 0: op = rv_op_wfi; break; + } + break; + } + break; + case 288: op = rv_op_sfence_vma; break; + case 512: + switch (((inst >> 15) & 0b1111111111)) { + case 64: op = rv_op_hret; break; + } + break; + case 768: + switch (((inst >> 15) & 0b1111111111)) { + case 64: op = rv_op_mret; break; + } + break; + case 1952: + switch (((inst >> 15) & 0b1111111111)) { + case 576: op = rv_op_dret; break; + } + break; + } + break; + case 1: op = rv_op_csrrw; break; + case 2: op = rv_op_csrrs; break; + case 3: op = rv_op_csrrc; break; + case 5: op = rv_op_csrrwi; break; + case 6: op = rv_op_csrrsi; break; + case 7: op = rv_op_csrrci; break; + } + break; + case 30: + switch (((inst >> 22) & 0b1111111000) | ((inst >> 12) & 0b0000000111)) { + case 0: op = rv_op_addd; break; + case 1: op = rv_op_slld; break; + case 5: op = rv_op_srld; break; + case 8: op = rv_op_muld; break; + case 12: op = rv_op_divd; break; + case 13: op = rv_op_divud; break; + case 14: op = rv_op_remd; break; + case 15: op = rv_op_remud; break; + case 256: op = rv_op_subd; break; + case 261: op = rv_op_srad; break; + } + break; + } + break; + } + dec->op = op; +} + +/* operand extractors */ + +static uint32_t operand_rd(rv_inst inst) { + return (inst << 52) >> 59; +} + +static uint32_t operand_rs1(rv_inst inst) { + return (inst << 44) >> 59; +} + +static uint32_t operand_rs2(rv_inst inst) { + return (inst << 39) >> 59; +} + +static uint32_t operand_rs3(rv_inst inst) { + return (inst << 32) >> 59; +} + +static uint32_t operand_aq(rv_inst inst) { + return (inst << 37) >> 63; +} + +static uint32_t operand_rl(rv_inst inst) { + return (inst << 38) >> 63; +} + +static uint32_t operand_pred(rv_inst inst) { + return (inst << 36) >> 60; +} + +static uint32_t operand_succ(rv_inst inst) { + return (inst << 40) >> 60; +} + +static uint32_t operand_rm(rv_inst inst) { + return (inst << 49) >> 61; +} + +static uint32_t operand_shamt5(rv_inst inst) { + return (inst << 39) >> 59; +} + +static uint32_t operand_shamt6(rv_inst inst) { + return (inst << 38) >> 58; +} + +static uint32_t operand_shamt7(rv_inst inst) { + return (inst << 37) >> 57; +} + +static uint32_t operand_crdq(rv_inst inst) { + return (inst << 59) >> 61; +} + +static uint32_t operand_crs1q(rv_inst inst) { + return (inst << 54) >> 61; +} + +static uint32_t operand_crs1rdq(rv_inst inst) { + return (inst << 54) >> 61; +} + +static uint32_t operand_crs2q(rv_inst inst) { + return (inst << 59) >> 61; +} + +static uint32_t operand_crd(rv_inst inst) { + return (inst << 52) >> 59; +} + +static uint32_t operand_crs1(rv_inst inst) { + return (inst << 52) >> 59; +} + +static uint32_t operand_crs1rd(rv_inst inst) { + return (inst << 52) >> 59; +} + +static uint32_t operand_crs2(rv_inst inst) { + return (inst << 57) >> 59; +} + +static uint32_t operand_cimmsh5(rv_inst inst) { + return (inst << 57) >> 59; +} + +static uint32_t operand_csr12(rv_inst inst) { + return (inst << 32) >> 52; +} + +static int32_t operand_imm12(rv_inst inst) { + return ((int64_t)inst << 32) >> 52; +} + +static int32_t operand_imm20(rv_inst inst) { + return (((int64_t)inst << 32) >> 44) << 12; +} + +static int32_t operand_jimm20(rv_inst inst) { + return (((int64_t)inst << 32) >> 63) << 20 | + ((inst << 33) >> 54) << 1 | + ((inst << 43) >> 63) << 11 | + ((inst << 44) >> 56) << 12; +} + +static int32_t operand_simm12(rv_inst inst) { + return (((int64_t)inst << 32) >> 57) << 5 | + (inst << 52) >> 59; +} + +static int32_t operand_sbimm12(rv_inst inst) { + return (((int64_t)inst << 32) >> 63) << 12 | + ((inst << 33) >> 58) << 5 | + ((inst << 52) >> 60) << 1 | + ((inst << 56) >> 63) << 11; +} + +static uint32_t operand_cimmsh6(rv_inst inst) { + return ((inst << 51) >> 63) << 5 | + (inst << 57) >> 59; +} + +static int32_t operand_cimmi(rv_inst inst) { + return (((int64_t)inst << 51) >> 63) << 5 | + (inst << 57) >> 59; +} + +static int32_t operand_cimmui(rv_inst inst) { + return (((int64_t)inst << 51) >> 63) << 17 | + ((inst << 57) >> 59) << 12; +} + +static uint32_t operand_cimmlwsp(rv_inst inst) { + return ((inst << 51) >> 63) << 5 | + ((inst << 57) >> 61) << 2 | + ((inst << 60) >> 62) << 6; +} + +static uint32_t operand_cimmldsp(rv_inst inst) { + return ((inst << 51) >> 63) << 5 | + ((inst << 57) >> 62) << 3 | + ((inst << 59) >> 61) << 6; +} + +static uint32_t operand_cimmlqsp(rv_inst inst) { + return ((inst << 51) >> 63) << 5 | + ((inst << 57) >> 63) << 4 | + ((inst << 58) >> 60) << 6; +} + +static int32_t operand_cimm16sp(rv_inst inst) { + return (((int64_t)inst << 51) >> 63) << 9 | + ((inst << 57) >> 63) << 4 | + ((inst << 58) >> 63) << 6 | + ((inst << 59) >> 62) << 7 | + ((inst << 61) >> 63) << 5; +} + +static int32_t operand_cimmj(rv_inst inst) { + return (((int64_t)inst << 51) >> 63) << 11 | + ((inst << 52) >> 63) << 4 | + ((inst << 53) >> 62) << 8 | + ((inst << 55) >> 63) << 10 | + ((inst << 56) >> 63) << 6 | + ((inst << 57) >> 63) << 7 | + ((inst << 58) >> 61) << 1 | + ((inst << 61) >> 63) << 5; +} + +static int32_t operand_cimmb(rv_inst inst) { + return (((int64_t)inst << 51) >> 63) << 8 | + ((inst << 52) >> 62) << 3 | + ((inst << 57) >> 62) << 6 | + ((inst << 59) >> 62) << 1 | + ((inst << 61) >> 63) << 5; +} + +static uint32_t operand_cimmswsp(rv_inst inst) { + return ((inst << 51) >> 60) << 2 | + ((inst << 55) >> 62) << 6; +} + +static uint32_t operand_cimmsdsp(rv_inst inst) { + return ((inst << 51) >> 61) << 3 | + ((inst << 54) >> 61) << 6; +} + +static uint32_t operand_cimmsqsp(rv_inst inst) { + return ((inst << 51) >> 62) << 4 | + ((inst << 53) >> 60) << 6; +} + +static uint32_t operand_cimm4spn(rv_inst inst) { + return ((inst << 51) >> 62) << 4 | + ((inst << 53) >> 60) << 6 | + ((inst << 57) >> 63) << 2 | + ((inst << 58) >> 63) << 3; +} + +static uint32_t operand_cimmw(rv_inst inst) { + return ((inst << 51) >> 61) << 3 | + ((inst << 57) >> 63) << 2 | + ((inst << 58) >> 63) << 6; +} + +static uint32_t operand_cimmd(rv_inst inst) { + return ((inst << 51) >> 61) << 3 | + ((inst << 57) >> 62) << 6; +} + +static uint32_t operand_cimmq(rv_inst inst) { + return ((inst << 51) >> 62) << 4 | + ((inst << 53) >> 63) << 8 | + ((inst << 57) >> 62) << 6; +} + +/* decode operands */ + +static void decode_inst_operands(rv_decode *dec) +{ + rv_inst inst = dec->inst; + dec->codec = opcode_data[dec->op].codec; + switch (dec->codec) { + case rv_codec_none: + dec->rd = dec->rs1 = dec->rs2 = rv_ireg_zero; + dec->imm = 0; + break; + case rv_codec_u: + dec->rd = operand_rd(inst); + dec->rs1 = dec->rs2 = rv_ireg_zero; + dec->imm = operand_imm20(inst); + break; + case rv_codec_uj: + dec->rd = operand_rd(inst); + dec->rs1 = dec->rs2 = rv_ireg_zero; + dec->imm = operand_jimm20(inst); + break; + case rv_codec_i: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = operand_imm12(inst); + break; + case rv_codec_i_sh5: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = operand_shamt5(inst); + break; + case rv_codec_i_sh6: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = operand_shamt6(inst); + break; + case rv_codec_i_sh7: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = operand_shamt7(inst); + break; + case rv_codec_i_csr: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = operand_csr12(inst); + break; + case rv_codec_s: + dec->rd = rv_ireg_zero; + dec->rs1 = operand_rs1(inst); + dec->rs2 = operand_rs2(inst); + dec->imm = operand_simm12(inst); + break; + case rv_codec_sb: + dec->rd = rv_ireg_zero; + dec->rs1 = operand_rs1(inst); + dec->rs2 = operand_rs2(inst); + dec->imm = operand_sbimm12(inst); + break; + case rv_codec_r: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = operand_rs2(inst); + dec->imm = 0; + break; + case rv_codec_r_m: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = operand_rs2(inst); + dec->imm = 0; + dec->rm = operand_rm(inst); + break; + case rv_codec_r4_m: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = operand_rs2(inst); + dec->rs3 = operand_rs3(inst); + dec->imm = 0; + dec->rm = operand_rm(inst); + break; + case rv_codec_r_a: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = operand_rs2(inst); + dec->imm = 0; + dec->aq = operand_aq(inst); + dec->rl = operand_rl(inst); + break; + case rv_codec_r_l: + dec->rd = operand_rd(inst); + dec->rs1 = operand_rs1(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = 0; + dec->aq = operand_aq(inst); + dec->rl = operand_rl(inst); + break; + case rv_codec_r_f: + dec->rd = dec->rs1 = dec->rs2 = rv_ireg_zero; + dec->pred = operand_pred(inst); + dec->succ = operand_succ(inst); + dec->imm = 0; + break; + case rv_codec_cb: + dec->rd = rv_ireg_zero; + dec->rs1 = operand_crs1q(inst) + 8; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmb(inst); + break; + case rv_codec_cb_imm: + dec->rd = dec->rs1 = operand_crs1rdq(inst) + 8; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmi(inst); + break; + case rv_codec_cb_sh5: + dec->rd = dec->rs1 = operand_crs1rdq(inst) + 8; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmsh5(inst); + break; + case rv_codec_cb_sh6: + dec->rd = dec->rs1 = operand_crs1rdq(inst) + 8; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmsh6(inst); + break; + case rv_codec_ci: + dec->rd = dec->rs1 = operand_crs1rd(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmi(inst); + break; + case rv_codec_ci_sh5: + dec->rd = dec->rs1 = operand_crs1rd(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmsh5(inst); + break; + case rv_codec_ci_sh6: + dec->rd = dec->rs1 = operand_crs1rd(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmsh6(inst); + break; + case rv_codec_ci_16sp: + dec->rd = rv_ireg_sp; + dec->rs1 = rv_ireg_sp; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimm16sp(inst); + break; + case rv_codec_ci_lwsp: + dec->rd = operand_crd(inst); + dec->rs1 = rv_ireg_sp; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmlwsp(inst); + break; + case rv_codec_ci_ldsp: + dec->rd = operand_crd(inst); + dec->rs1 = rv_ireg_sp; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmldsp(inst); + break; + case rv_codec_ci_lqsp: + dec->rd = operand_crd(inst); + dec->rs1 = rv_ireg_sp; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmlqsp(inst); + break; + case rv_codec_ci_li: + dec->rd = operand_crd(inst); + dec->rs1 = rv_ireg_zero; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmi(inst); + break; + case rv_codec_ci_lui: + dec->rd = operand_crd(inst); + dec->rs1 = rv_ireg_zero; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmui(inst); + break; + case rv_codec_ci_none: + dec->rd = dec->rs1 = dec->rs2 = rv_ireg_zero; + dec->imm = 0; + break; + case rv_codec_ciw_4spn: + dec->rd = operand_crdq(inst) + 8; + dec->rs1 = rv_ireg_sp; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimm4spn(inst); + break; + case rv_codec_cj: + dec->rd = dec->rs1 = dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmj(inst); + break; + case rv_codec_cj_jal: + dec->rd = rv_ireg_ra; + dec->rs1 = dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmj(inst); + break; + case rv_codec_cl_lw: + dec->rd = operand_crdq(inst) + 8; + dec->rs1 = operand_crs1q(inst) + 8; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmw(inst); + break; + case rv_codec_cl_ld: + dec->rd = operand_crdq(inst) + 8; + dec->rs1 = operand_crs1q(inst) + 8; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmd(inst); + break; + case rv_codec_cl_lq: + dec->rd = operand_crdq(inst) + 8; + dec->rs1 = operand_crs1q(inst) + 8; + dec->rs2 = rv_ireg_zero; + dec->imm = operand_cimmq(inst); + break; + case rv_codec_cr: + dec->rd = dec->rs1 = operand_crs1rd(inst); + dec->rs2 = operand_crs2(inst); + dec->imm = 0; + break; + case rv_codec_cr_mv: + dec->rd = operand_crd(inst); + dec->rs1 = operand_crs2(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = 0; + break; + case rv_codec_cr_jalr: + dec->rd = rv_ireg_ra; + dec->rs1 = operand_crs1(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = 0; + break; + case rv_codec_cr_jr: + dec->rd = rv_ireg_zero; + dec->rs1 = operand_crs1(inst); + dec->rs2 = rv_ireg_zero; + dec->imm = 0; + break; + case rv_codec_cs: + dec->rd = dec->rs1 = operand_crs1rdq(inst) + 8; + dec->rs2 = operand_crs2q(inst) + 8; + dec->imm = 0; + break; + case rv_codec_cs_sw: + dec->rd = rv_ireg_zero; + dec->rs1 = operand_crs1q(inst) + 8; + dec->rs2 = operand_crs2q(inst) + 8; + dec->imm = operand_cimmw(inst); + break; + case rv_codec_cs_sd: + dec->rd = rv_ireg_zero; + dec->rs1 = operand_crs1q(inst) + 8; + dec->rs2 = operand_crs2q(inst) + 8; + dec->imm = operand_cimmd(inst); + break; + case rv_codec_cs_sq: + dec->rd = rv_ireg_zero; + dec->rs1 = operand_crs1q(inst) + 8; + dec->rs2 = operand_crs2q(inst) + 8; + dec->imm = operand_cimmq(inst); + break; + case rv_codec_css_swsp: + dec->rd = rv_ireg_zero; + dec->rs1 = rv_ireg_sp; + dec->rs2 = operand_crs2(inst); + dec->imm = operand_cimmswsp(inst); + break; + case rv_codec_css_sdsp: + dec->rd = rv_ireg_zero; + dec->rs1 = rv_ireg_sp; + dec->rs2 = operand_crs2(inst); + dec->imm = operand_cimmsdsp(inst); + break; + case rv_codec_css_sqsp: + dec->rd = rv_ireg_zero; + dec->rs1 = rv_ireg_sp; + dec->rs2 = operand_crs2(inst); + dec->imm = operand_cimmsqsp(inst); + break; + }; +} + +/* decompress instruction */ + +static void decode_inst_decompress(rv_decode *dec, rv_isa isa) +{ + int decomp_op; + switch (isa) { + case rv32: decomp_op = opcode_data[dec->op].decomp_rv32; break; + case rv64: decomp_op = opcode_data[dec->op].decomp_rv64; break; + case rv128: decomp_op = opcode_data[dec->op].decomp_rv128; break; + } + if (decomp_op != rv_op_illegal) { + if ((opcode_data[dec->op].decomp_data & rvcd_imm_nz) && dec->imm == 0) { + dec->op = rv_op_illegal; + } else { + dec->op = decomp_op; + dec->codec = opcode_data[decomp_op].codec; + } + } +} + +/* check constraint */ + +static bool check_constraints(rv_decode *dec, const rvc_constraint *c) +{ + int32_t imm = dec->imm; + uint8_t rd = dec->rd, rs1 = dec->rs1, rs2 = dec->rs2; + while (*c != rvc_end) { + switch (*c) { + case rvc_rd_eq_ra: if (!(rd == 1)) return false; break; + case rvc_rd_eq_x0: if (!(rd == 0)) return false; break; + case rvc_rs1_eq_x0: if (!(rs1 == 0)) return false; break; + case rvc_rs2_eq_x0: if (!(rs2 == 0)) return false; break; + case rvc_rs2_eq_rs1: if (!(rs2 == rs1)) return false; break; + case rvc_rs1_eq_ra: if (!(rs1 == 1)) return false; break; + case rvc_imm_eq_zero: if (!(imm == 0)) return false; break; + case rvc_imm_eq_n1: if (!(imm == -1)) return false; break; + case rvc_imm_eq_p1: if (!(imm == 1)) return false; break; + case rvc_csr_eq_0x001: if (!(imm == 0x001)) return false; break; + case rvc_csr_eq_0x002: if (!(imm == 0x002)) return false; break; + case rvc_csr_eq_0x003: if (!(imm == 0x003)) return false; break; + case rvc_csr_eq_0xc00: if (!(imm == 0xc00)) return false; break; + case rvc_csr_eq_0xc01: if (!(imm == 0xc01)) return false; break; + case rvc_csr_eq_0xc02: if (!(imm == 0xc02)) return false; break; + case rvc_csr_eq_0xc80: if (!(imm == 0xc80)) return false; break; + case rvc_csr_eq_0xc81: if (!(imm == 0xc81)) return false; break; + case rvc_csr_eq_0xc82: if (!(imm == 0xc82)) return false; break; + default: break; + } + c++; + } + return true; +} + +/* lift instruction to pseudo-instruction */ + +static void decode_inst_lift_pseudo(rv_decode *dec) +{ + const rv_comp_data *comp_data = opcode_data[dec->op].pseudo; + if (!comp_data) { + return; + } + while (comp_data->constraints) { + if (check_constraints(dec, comp_data->constraints)) { + dec->op = comp_data->op; + dec->codec = opcode_data[dec->op].codec; + return; + } + comp_data++; + } +} + +/* format instruction */ + +static void append(char *s1, const char *s2, ssize_t n) +{ + ssize_t l1 = strlen(s1); + if (n - l1 - 1 > 0) { + strncat(s1, s2, n - l1); + } +} + +#define INST_FMT_2 "%04" PRIx64 " " +#define INST_FMT_4 "%08" PRIx64 " " +#define INST_FMT_6 "%012" PRIx64 " " +#define INST_FMT_8 "%016" PRIx64 " " + +static void decode_inst_format(char *buf, size_t buflen, size_t tab, rv_decode *dec) +{ + char tmp[64]; + const char *fmt; + + size_t len = inst_length(dec->inst); + switch (len) { + case 2: + snprintf(buf, buflen, INST_FMT_2, dec->inst); + break; + case 4: + snprintf(buf, buflen, INST_FMT_4, dec->inst); + break; + case 6: + snprintf(buf, buflen, INST_FMT_6, dec->inst); + break; + default: + snprintf(buf, buflen, INST_FMT_8, dec->inst); + break; + } + + fmt = opcode_data[dec->op].format; + while (*fmt) { + switch (*fmt) { + case 'O': + append(buf, opcode_data[dec->op].name, buflen); + break; + case '(': + append(buf, "(", buflen); + break; + case ',': + append(buf, ",", buflen); + break; + case ')': + append(buf, ")", buflen); + break; + case '0': + append(buf, rv_ireg_name_sym[dec->rd], buflen); + break; + case '1': + append(buf, rv_ireg_name_sym[dec->rs1], buflen); + break; + case '2': + append(buf, rv_ireg_name_sym[dec->rs2], buflen); + break; + case '3': + append(buf, rv_freg_name_sym[dec->rd], buflen); + break; + case '4': + append(buf, rv_freg_name_sym[dec->rs1], buflen); + break; + case '5': + append(buf, rv_freg_name_sym[dec->rs2], buflen); + break; + case '6': + append(buf, rv_freg_name_sym[dec->rs3], buflen); + break; + case '7': + snprintf(tmp, sizeof(tmp), "%d", dec->rs1); + append(buf, tmp, buflen); + break; + case 'i': + snprintf(tmp, sizeof(tmp), "%d", dec->imm); + append(buf, tmp, buflen); + break; + case 'o': + snprintf(tmp, sizeof(tmp), "%d", dec->imm); + append(buf, tmp, buflen); + while (strlen(buf) < tab * 2) { + append(buf, " ", buflen); + } + snprintf(tmp, sizeof(tmp), "# 0x%" PRIx64, + dec->pc + dec->imm); + append(buf, tmp, buflen); + break; + case 'c': { + const char *name = csr_name(dec->imm & 0xfff); + if (name) { + append(buf, name, buflen); + } else { + snprintf(tmp, sizeof(tmp), "0x%03x", dec->imm & 0xfff); + append(buf, tmp, buflen); + } + break; + } + case 'r': + switch (dec->rm) { + case rv_rm_rne: + append(buf, "rne", buflen); + break; + case rv_rm_rtz: + append(buf, "rtz", buflen); + break; + case rv_rm_rdn: + append(buf, "rdn", buflen); + break; + case rv_rm_rup: + append(buf, "rup", buflen); + break; + case rv_rm_rmm: + append(buf, "rmm", buflen); + break; + case rv_rm_dyn: + append(buf, "dyn", buflen); + break; + default: + append(buf, "inv", buflen); + break; + } + break; + case 'p': + if (dec->pred & rv_fence_i) { + append(buf, "i", buflen); + } + if (dec->pred & rv_fence_o) { + append(buf, "o", buflen); + } + if (dec->pred & rv_fence_r) { + append(buf, "r", buflen); + } + if (dec->pred & rv_fence_w) { + append(buf, "w", buflen); + } + break; + case 's': + if (dec->succ & rv_fence_i) { + append(buf, "i", buflen); + } + if (dec->succ & rv_fence_o) { + append(buf, "o", buflen); + } + if (dec->succ & rv_fence_r) { + append(buf, "r", buflen); + } + if (dec->succ & rv_fence_w) { + append(buf, "w", buflen); + } + break; + case '\t': + while (strlen(buf) < tab) { + append(buf, " ", buflen); + } + break; + case 'A': + if (dec->aq) { + append(buf, ".aq", buflen); + } + break; + case 'R': + if (dec->rl) { + append(buf, ".rl", buflen); + } + break; + default: + break; + } + fmt++; + } +} + +/* instruction length */ + +size_t inst_length(rv_inst inst) +{ + /* NOTE: supports maximum instruction size of 64-bits */ + + /* instruction length coding + * + * aa - 16 bit aa != 11 + * bbb11 - 32 bit bbb != 111 + * 011111 - 48 bit + * 0111111 - 64 bit + */ + + return (inst & 0b11) != 0b11 ? 2 + : (inst & 0b11100) != 0b11100 ? 4 + : (inst & 0b111111) == 0b011111 ? 6 + : (inst & 0b1111111) == 0b0111111 ? 8 + : 0; +} + +/* instruction fetch */ + +void inst_fetch(const uint8_t *data, rv_inst *instp, size_t *length) +{ + rv_inst inst = ((rv_inst)data[1] << 8) | ((rv_inst)data[0]); + size_t len = *length = inst_length(inst); + if (len >= 8) inst |= ((rv_inst)data[7] << 56) | ((rv_inst)data[6] << 48); + if (len >= 6) inst |= ((rv_inst)data[5] << 40) | ((rv_inst)data[4] << 32); + if (len >= 4) inst |= ((rv_inst)data[3] << 24) | ((rv_inst)data[2] << 16); + *instp = inst; +} + +/* disassemble instruction */ + +void disasm_inst(char *buf, size_t buflen, rv_isa isa, uint64_t pc, rv_inst inst) +{ + rv_decode dec = { .pc = pc, .inst = inst }; + decode_inst_opcode(&dec, isa); + decode_inst_operands(&dec); + decode_inst_decompress(&dec, isa); + decode_inst_lift_pseudo(&dec); + decode_inst_format(buf, buflen, 32, &dec); +}