From e9c8e60ee8d58e21a7f3582a4695eeb99ec4c5d7 Mon Sep 17 00:00:00 2001 From: Stefan Stipanovic Date: Wed, 22 May 2024 10:34:20 +0200 Subject: [PATCH] [RISCV] Support for ESP32-P4 instructions in RISCV backend --- .gitignore | 1 + .gitlab-ci.yml | 26 +- clang/include/clang/Basic/BuiltinsRISCV.def | 2 + .../clang/Basic/BuiltinsRISCVESP32P4.def | 354 + clang/test/CodeGen/RISCV/riscv-esp32p4.c | 1027 + clang/test/Misc/target-invalid-cpu-note.c | 4 +- llvm/include/llvm/IR/IntrinsicsRISCV.td | 4 + .../include/llvm/IR/IntrinsicsRISCVESP32P4.td | 1065 ++ llvm/lib/Support/RISCVISAInfo.cpp | 1 + .../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 97 + llvm/lib/Target/RISCV/CMakeLists.txt | 1 + .../RISCV/Disassembler/RISCVDisassembler.cpp | 153 + .../Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 3 + .../RISCV/MCTargetDesc/RISCVInstPrinter.cpp | 116 + .../RISCV/MCTargetDesc/RISCVInstPrinter.h | 20 + .../RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp | 187 +- .../Target/RISCV/RISCVESP32P4ISelLowering.cpp | 8468 +++++++++ llvm/lib/Target/RISCV/RISCVESP32P4Operands.td | 134 + llvm/lib/Target/RISCV/RISCVFeatures.td | 3 + llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 14 +- llvm/lib/Target/RISCV/RISCVISelLowering.h | 4 + .../Target/RISCV/RISCVInstrFormatsESP32P4.td | 42 + llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 6 + llvm/lib/Target/RISCV/RISCVInstrInfo.td | 1 + .../lib/Target/RISCV/RISCVInstrInfoESP32P4.td | 15603 ++++++++++++++++ llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td | 172 + llvm/lib/Target/RISCV/RISCVProcessors.td | 14 + llvm/lib/Target/RISCV/RISCVRegisterInfo.td | 21 + llvm/test/CodeGen/RISCV/esp32p4.ll | 1289 ++ llvm/test/MC/RISCV/esp32p4-hwlp-valid.s | 22 + llvm/test/MC/RISCV/esp32p4-valid.s | 710 + llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s | 2 +- llvm/unittests/Support/RISCVISAInfoTest.cpp | 1 + llvm/utils/TableGen/AsmMatcherEmitter.cpp | 5 +- 34 files changed, 29564 insertions(+), 8 deletions(-) create mode 100644 clang/include/clang/Basic/BuiltinsRISCVESP32P4.def create mode 100644 clang/test/CodeGen/RISCV/riscv-esp32p4.c create mode 100644 llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td create mode 100644 llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp create mode 100644 llvm/lib/Target/RISCV/RISCVESP32P4Operands.td create mode 100644 llvm/lib/Target/RISCV/RISCVInstrFormatsESP32P4.td create mode 100644 llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td create mode 100644 llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td create mode 100644 llvm/test/CodeGen/RISCV/esp32p4.ll create mode 100644 llvm/test/MC/RISCV/esp32p4-hwlp-valid.s create mode 100644 llvm/test/MC/RISCV/esp32p4-valid.s diff --git a/.gitignore b/.gitignore index 0e13e978416184..1315ce235e6550 100644 --- a/.gitignore +++ b/.gitignore @@ -73,3 +73,4 @@ pythonenv* /clang/utils/analyzer/projects/*/RefScanBuildResults # automodapi puts generated documentation files here. /lldb/docs/python_api/ +/dbg diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b28cf30282426d..d38b3dc5fd3660 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -186,7 +186,7 @@ build_and_test: [ ! -f "${BUILD_DIR}/lld-tests.log" ] || grep -i "internal compiler error\|Killed" ${BUILD_DIR}/lld-tests.log || true fi script: - - BUILD_PATH=$PWD/${BUILD_DIR} + - export BUILD_PATH=$PWD/${BUILD_DIR} - mkdir -p ${BUILD_PATH} - cmake -G Ninja -S llvm @@ -209,6 +209,29 @@ build_and_test: - runuser -u test_runner -- ninja -C ${BUILD_PATH} check-lld 2>&1 > ${BUILD_PATH}/lld-tests.log; - chown -R ${CUR_USER} ${BUILD_PATH}; +test_xesppie: + stage: test + dependencies: + - pack_x86_64-linux-gnu + when: manual + allow_failure: true + only: + - tags + script: + - cd ${DIST_DIR}/ + - ls -l + - DISTRO_PACK_FILE=$(cat dist_name_x86_64-linux-gnu) + - tar -xf ${DISTRO_PACK_FILE} + - ls -l + - cd esp-clang + - ls -l + - pwd + - export CC="$(pwd)/bin/clang" + - export OBJDUMP="$(pwd)/bin/llvm-objdump" + - git clone -q --depth=1 "${GITLAB_SSH_SERVER}/idf/esp-compiler-tests.git" + - cd esp-compiler-tests/build-only/xesppie + - ./test_xesppie.py + .build_linux-gnu_template: extends: .build_toolchain_template variables: @@ -441,6 +464,7 @@ upload_to_github: - job: pack_x86_64-w64-mingw32 - job: sign_x86_64-apple-darwin - job: sign_aarch64-apple-darwin + - job: test_xesppie before_script: [] script: - ls -l ${DIST_DIR} diff --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def index 1528b18c82eade..f295c266b0a9b6 100644 --- a/clang/include/clang/Basic/BuiltinsRISCV.def +++ b/clang/include/clang/Basic/BuiltinsRISCV.def @@ -89,5 +89,7 @@ TARGET_BUILTIN(__builtin_riscv_sm3p1, "UiUi", "nc", "zksh") TARGET_BUILTIN(__builtin_riscv_ntl_load, "v.", "t", "zihintntl") TARGET_BUILTIN(__builtin_riscv_ntl_store, "v.", "t", "zihintntl") +#include "clang/Basic/BuiltinsRISCVESP32P4.def" + #undef BUILTIN #undef TARGET_BUILTIN diff --git a/clang/include/clang/Basic/BuiltinsRISCVESP32P4.def b/clang/include/clang/Basic/BuiltinsRISCVESP32P4.def new file mode 100644 index 00000000000000..f9c9eab176845e --- /dev/null +++ b/clang/include/clang/Basic/BuiltinsRISCVESP32P4.def @@ -0,0 +1,354 @@ +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s16_qacc_h, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s16_qacc_l, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s8_qacc_h, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s8_qacc_l, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_qacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_qacc_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_qacc_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_qacc_st_ip, "vUiUiUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_qacc_st_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_xacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_xacc_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_xacc_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_xacc_st_ip, "vUiUiUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_xacc_st_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_qacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_qacc_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_qacc_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_qacc_st_ip, "vUiUiUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_qacc_st_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_xacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_xacc_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_xacc_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_xacc_st_ip, "vUiUiUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_xacc_st_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_qacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_qacc_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_qacc_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_qacc_st_ip, "vUiUiUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_qacc_st_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_xacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_xacc_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_xacc_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_xacc_st_ip, "vUiUiUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_xacc_st_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_qacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_qacc_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_qacc_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_qacc_st_ip, "vUiUiUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_qacc_st_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_xacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_xacc_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_xacc_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_xacc_st_ip, "vUiUiUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_xacc_st_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s16_qacc_ldbc_incp, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_s8_qacc_ldbc_incp, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u16_qacc_ldbc_incp, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmulas_u8_qacc_ldbc_incp, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsmulas_s16_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsmulas_s16_qacc_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsmulas_s8_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsmulas_s8_qacc_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsmulas_u16_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsmulas_u16_qacc_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsmulas_u8_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsmulas_u8_qacc_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_s16, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_s16_ld_incp, "vUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_s16_st_incp, "vUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_s8, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_s8_ld_incp, "vUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_s8_st_incp, "vUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_u16, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_u16_ld_incp, "vUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_u16_st_incp, "vUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_u8, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_u8_ld_incp, "vUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_cmul_u8_st_incp, "vUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_max_s16_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_max_s32_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_max_s8_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_max_u16_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_max_u32_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_max_u8_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_min_s16_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_min_s32_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_min_s8_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_min_u16_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_min_u32_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_min_u8_a, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vabs_16, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vabs_32, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vabs_8, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_s16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_s16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_s32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_s32_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_s32_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_s8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_s8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_u16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_u16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_u32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_u32_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_u32_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_u8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vadd_u8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vclamp_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_s16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_s16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_s32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_s32_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_s32_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_s8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_s8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_u16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_u16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_u32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_u32_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_u32_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_u8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmax_u8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_s16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_s16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_s32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_s32_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_s32_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_s8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_s8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_u16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_u16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_u32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_u32_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_u32_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_u8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmin_u8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_s16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_s16_s8xs8, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_s16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_s32_s16xs16, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_s8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_s8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_u16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_u16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_u8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vmul_u8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vprelu_s16, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vprelu_s8, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vrelu_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vrelu_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsadds_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsadds_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsadds_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsadds_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsat_s16, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsat_s32, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsat_s8, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsat_u16, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsat_u32, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsat_u8, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vssubs_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vssubs_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vssubs_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vssubs_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_s16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_s16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_s32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_s32_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_s32_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_s8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_s8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_u16_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_u16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_u32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_u32_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_u32_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_u8_ld_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsub_u8_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_addx2, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_addx4, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_sat, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_subx2, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_subx4, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_andq, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_notq, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_orq, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_xorq, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_eq_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_eq_s32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_eq_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_eq_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_eq_u32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_eq_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_gt_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_gt_s32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_gt_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_gt_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_gt_u32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_gt_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_lt_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_lt_s32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_lt_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_lt_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_lt_u32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vcmp_lt_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_mov_s16_qacc, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_mov_s8_qacc, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_mov_u16_qacc, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_mov_u8_qacc, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movi_16_a, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movi_16_q, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movi_32_a, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movi_32_q, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movi_8_a, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movi_8_q, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_r_cfg, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_r_fft_bit_width, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_r_perf, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_r_sar, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_r_sar_bytes, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_r_xacc_h, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_r_xacc_l, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_w_cfg, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_w_fft_bit_width, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_w_perf, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_w_sar, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_w_sar_bytes, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_w_xacc_h, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_movx_w_xacc_l, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vext_s16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vext_s8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vext_u16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vext_u8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vunzip_16, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vunzip_32, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vunzip_8, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vunzipt_16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vunzipt_8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vzip_16, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vzip_32, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vzip_8, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vzipt_16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vzipt_8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_zero_q, "vUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_zero_qacc, "v", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_zero_xacc, "v", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_ams_s16_ld_incp, "vUiUiUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_ams_s16_ld_incp_uaup, "vUiUiUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_ams_s16_ld_r32_decp, "vUiUiUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_ams_s16_st_incp, "vUiUiUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_bitrev, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_cmul_s16_ld_xp, "vUiUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_cmul_s16_st_xp, "vUiUiUiUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_r2bf_s16, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_r2bf_s16_st_incp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_fft_vst_r32_decp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ld_128_usar_ip, "vUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ld_128_usar_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ld_xacc_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ldqa_s16_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ldqa_s16_128_xp, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ldqa_s8_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ldqa_s8_128_xp, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ldqa_u16_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ldqa_u16_128_xp, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ldqa_u8_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ldqa_u8_128_xp, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldbc_16_ip, "vUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldbc_16_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldbc_32_ip, "vUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldbc_32_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldbc_8_ip, "vUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldbc_8_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldext_s16_ip, "vUiIiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldext_s16_xp, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldext_s8_ip, "vUiIiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldext_s8_xp, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldext_u16_ip, "vUiIiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldext_u16_xp, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldext_u8_ip, "vUiIiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldext_u8_xp, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vldhbc_16_incp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ld_qacc_h_h_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ld_qacc_h_l_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ld_qacc_l_h_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ld_qacc_l_l_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ld_ua_state_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_ldxq_32, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_st_qacc_h_h_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_st_qacc_h_l_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_st_qacc_l_h_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_st_qacc_l_l_128_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_st_ua_state_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_stxq_32, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vld_128_ip, "vUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vld_128_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vld_h_64_ip, "vUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vld_h_64_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vld_l_64_ip, "vUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vld_l_64_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vst_128_ip, "vUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vst_128_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vst_h_64_ip, "vUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vst_h_64_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vst_l_64_ip, "vUiUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vst_l_64_xp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_slci_2q, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_slcxxp_2q, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_src_q, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_src_q_ld_ip, "vUiUiUiIiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_src_q_ld_xp, "vUiUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_src_q_qup, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srci_2q, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcmb_s16_q_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcmb_s16_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcmb_s8_q_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcmb_s8_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcmb_u16_q_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcmb_u16_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcmb_u8_q_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcmb_u8_qacc, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcq_128_st_incp, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srcxxp_2q, "vUiUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srs_s_xacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_srs_u_xacc, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsl_32, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsld_16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsld_32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsld_8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsr_s32, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsr_u32, "vUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsrd_16, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsrd_32, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_vsrd_8, "vUiUiUi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_st_s_xacc_ip, "vUiIi", "n", "xesppie") +TARGET_BUILTIN(__builtin_riscv_esp_st_u_xacc_ip, "vUiIi", "n", "xesppie") diff --git a/clang/test/CodeGen/RISCV/riscv-esp32p4.c b/clang/test/CodeGen/RISCV/riscv-esp32p4.c new file mode 100644 index 00000000000000..679ca00968b03c --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-esp32p4.c @@ -0,0 +1,1027 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +xesppie -S -emit-llvm -O0 -o - %s \ +// RUN: | FileCheck %s + +#include + +// CHECK-LABEL: @test( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[DATA:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 10, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32 4, i32 2) +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32 4, i32 0, i32 [[TMP0]], i32 -96, i32 3) +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32 [[TMP1]], i32 5, i32 5, i32 [[TMP2]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32 6, i32 1) +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32 2, i32 3, i32 [[TMP3]], i32 -48, i32 3) +// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32 [[TMP4]], i32 7, i32 2, i32 [[TMP5]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32 4, i32 4) +// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32 7, i32 4, i32 [[TMP6]], i32 -128, i32 4) +// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32 [[TMP7]], i32 2, i32 3, i32 [[TMP8]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32 6, i32 4) +// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32 5, i32 5, i32 [[TMP9]], i32 16, i32 7) +// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32 [[TMP10]], i32 4, i32 4, i32 [[TMP11]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc(i32 7, i32 6) +// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32 0, i32 4, i32 [[TMP12]], i32 96, i32 4) +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32 [[TMP13]], i32 4, i32 4, i32 [[TMP14]], i32 7) +// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32 2, i32 1, i32 7, i32 [[TMP15]], i32 -128) +// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32 [[TMP16]], i32 1, i32 2, i32 6, i32 [[TMP17]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc(i32 1, i32 3) +// CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32 7, i32 3, i32 [[TMP18]], i32 -96, i32 5) +// CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32 [[TMP19]], i32 3, i32 1, i32 [[TMP20]], i32 1) +// CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32 2, i32 0, i32 0, i32 [[TMP21]], i32 64) +// CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32 [[TMP22]], i32 6, i32 3, i32 6, i32 [[TMP23]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc(i32 0, i32 0) +// CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32 0, i32 3, i32 [[TMP24]], i32 0, i32 7) +// CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32 [[TMP25]], i32 4, i32 3, i32 [[TMP26]], i32 4) +// CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32 3, i32 3, i32 5, i32 [[TMP27]], i32 -64) +// CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32 [[TMP28]], i32 4, i32 7, i32 0, i32 [[TMP29]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc(i32 3, i32 3) +// CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32 3, i32 2, i32 [[TMP30]], i32 0, i32 5) +// CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32 [[TMP31]], i32 6, i32 3, i32 [[TMP32]], i32 0) +// CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32 1, i32 7, i32 7, i32 [[TMP33]], i32 -32) +// CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32 [[TMP34]], i32 6, i32 7, i32 6, i32 [[TMP35]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc(i32 5, i32 4) +// CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32 5, i32 2, i32 [[TMP36]], i32 64, i32 6) +// CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32 [[TMP37]], i32 5, i32 7, i32 [[TMP38]], i32 7) +// CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32 1, i32 4, i32 3, i32 [[TMP39]], i32 -96) +// CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP41:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32 [[TMP40]], i32 5, i32 0, i32 2, i32 [[TMP41]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc(i32 0, i32 7) +// CHECK-NEXT: [[TMP42:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32 6, i32 6, i32 [[TMP42]], i32 -96, i32 4) +// CHECK-NEXT: [[TMP43:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP44:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32 [[TMP43]], i32 6, i32 5, i32 [[TMP44]], i32 6) +// CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32 3, i32 0, i32 4, i32 [[TMP45]], i32 64) +// CHECK-NEXT: [[TMP46:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP47:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32 [[TMP46]], i32 1, i32 0, i32 4, i32 [[TMP47]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc(i32 5, i32 4) +// CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32 5, i32 3, i32 [[TMP48]], i32 80, i32 5) +// CHECK-NEXT: [[TMP49:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP50:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32 [[TMP49]], i32 4, i32 7, i32 [[TMP50]], i32 4) +// CHECK-NEXT: [[TMP51:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32 3, i32 3, i32 5, i32 [[TMP51]], i32 -96) +// CHECK-NEXT: [[TMP52:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP53:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32 [[TMP52]], i32 6, i32 7, i32 3, i32 [[TMP53]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc(i32 0, i32 1) +// CHECK-NEXT: [[TMP54:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32 6, i32 0, i32 [[TMP54]], i32 -32, i32 7) +// CHECK-NEXT: [[TMP55:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP56:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32 [[TMP55]], i32 3, i32 3, i32 [[TMP56]], i32 5) +// CHECK-NEXT: [[TMP57:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32 7, i32 0, i32 4, i32 [[TMP57]], i32 32) +// CHECK-NEXT: [[TMP58:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP59:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32 [[TMP58]], i32 1, i32 0, i32 0, i32 [[TMP59]]) +// CHECK-NEXT: [[TMP60:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32 3, i32 6, i32 [[TMP60]], i32 7) +// CHECK-NEXT: [[TMP61:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32 5, i32 3, i32 [[TMP61]], i32 6) +// CHECK-NEXT: [[TMP62:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32 0, i32 3, i32 [[TMP62]], i32 2) +// CHECK-NEXT: [[TMP63:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32 4, i32 7, i32 [[TMP63]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s16.qacc(i32 7, i32 7, i32 4) +// CHECK-NEXT: [[TMP64:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32 7, i32 7, i32 [[TMP64]], i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s8.qacc(i32 7, i32 0, i32 7) +// CHECK-NEXT: [[TMP65:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32 5, i32 6, i32 [[TMP65]], i32 15, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u16.qacc(i32 7, i32 0, i32 10) +// CHECK-NEXT: [[TMP66:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32 7, i32 6, i32 [[TMP66]], i32 1, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u8.qacc(i32 3, i32 6, i32 5) +// CHECK-NEXT: [[TMP67:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32 6, i32 1, i32 [[TMP67]], i32 4, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16(i32 2, i32 1, i32 3, i32 1) +// CHECK-NEXT: [[TMP68:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16.ld.incp(i32 2, i32 7, i32 [[TMP68]], i32 0, i32 5, i32 0) +// CHECK-NEXT: [[TMP69:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s16.st.incp(i32 7, i32 4, i32 6, i32 [[TMP69]], i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8(i32 5, i32 7, i32 2, i32 4) +// CHECK-NEXT: [[TMP70:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8.ld.incp(i32 0, i32 6, i32 [[TMP70]], i32 2, i32 7, i32 5) +// CHECK-NEXT: [[TMP71:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.s8.st.incp(i32 1, i32 6, i32 5, i32 [[TMP71]], i32 0, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16(i32 7, i32 4, i32 0, i32 0) +// CHECK-NEXT: [[TMP72:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16.ld.incp(i32 2, i32 0, i32 [[TMP72]], i32 3, i32 1, i32 1) +// CHECK-NEXT: [[TMP73:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u16.st.incp(i32 4, i32 3, i32 4, i32 [[TMP73]], i32 1, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8(i32 3, i32 4, i32 1, i32 5) +// CHECK-NEXT: [[TMP74:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8.ld.incp(i32 5, i32 0, i32 [[TMP74]], i32 1, i32 5, i32 1) +// CHECK-NEXT: [[TMP75:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.cmul.u8.st.incp(i32 2, i32 7, i32 4, i32 [[TMP75]], i32 3, i32 1) +// CHECK-NEXT: [[TMP76:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.s16.a(i32 2, i32 [[TMP76]]) +// CHECK-NEXT: [[TMP77:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.s32.a(i32 0, i32 [[TMP77]]) +// CHECK-NEXT: [[TMP78:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.s8.a(i32 7, i32 [[TMP78]]) +// CHECK-NEXT: [[TMP79:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.u16.a(i32 4, i32 [[TMP79]]) +// CHECK-NEXT: [[TMP80:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.u32.a(i32 4, i32 [[TMP80]]) +// CHECK-NEXT: [[TMP81:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.max.u8.a(i32 3, i32 [[TMP81]]) +// CHECK-NEXT: [[TMP82:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.s16.a(i32 0, i32 [[TMP82]]) +// CHECK-NEXT: [[TMP83:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.s32.a(i32 7, i32 [[TMP83]]) +// CHECK-NEXT: [[TMP84:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.s8.a(i32 4, i32 [[TMP84]]) +// CHECK-NEXT: [[TMP85:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.u16.a(i32 7, i32 [[TMP85]]) +// CHECK-NEXT: [[TMP86:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.u32.a(i32 6, i32 [[TMP86]]) +// CHECK-NEXT: [[TMP87:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.min.u8.a(i32 1, i32 [[TMP87]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vabs.16(i32 7, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vabs.32(i32 0, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vabs.8(i32 5, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16(i32 0, i32 4, i32 0) +// CHECK-NEXT: [[TMP88:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16.ld.incp(i32 4, i32 2, i32 [[TMP88]], i32 0, i32 7) +// CHECK-NEXT: [[TMP89:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s16.st.incp(i32 5, i32 7, i32 0, i32 [[TMP89]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32(i32 6, i32 5, i32 0) +// CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32.ld.incp(i32 5, i32 6, i32 [[TMP90]], i32 0, i32 2) +// CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s32.st.incp(i32 7, i32 7, i32 0, i32 [[TMP91]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8(i32 6, i32 5, i32 5) +// CHECK-NEXT: [[TMP92:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8.ld.incp(i32 2, i32 4, i32 [[TMP92]], i32 6, i32 7) +// CHECK-NEXT: [[TMP93:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.s8.st.incp(i32 4, i32 6, i32 4, i32 [[TMP93]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16(i32 0, i32 6, i32 5) +// CHECK-NEXT: [[TMP94:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16.ld.incp(i32 6, i32 7, i32 [[TMP94]], i32 5, i32 1) +// CHECK-NEXT: [[TMP95:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u16.st.incp(i32 1, i32 3, i32 4, i32 [[TMP95]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32(i32 7, i32 3, i32 0) +// CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32.ld.incp(i32 0, i32 4, i32 [[TMP96]], i32 5, i32 5) +// CHECK-NEXT: [[TMP97:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u32.st.incp(i32 1, i32 5, i32 6, i32 [[TMP97]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8(i32 0, i32 1, i32 5) +// CHECK-NEXT: [[TMP98:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8.ld.incp(i32 5, i32 1, i32 [[TMP98]], i32 2, i32 6) +// CHECK-NEXT: [[TMP99:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vadd.u8.st.incp(i32 1, i32 7, i32 4, i32 [[TMP99]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vclamp.s16(i32 3, i32 12, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16(i32 1, i32 2, i32 2) +// CHECK-NEXT: [[TMP100:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16.ld.incp(i32 3, i32 0, i32 [[TMP100]], i32 5, i32 1) +// CHECK-NEXT: [[TMP101:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s16.st.incp(i32 0, i32 4, i32 2, i32 [[TMP101]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32(i32 0, i32 2, i32 4) +// CHECK-NEXT: [[TMP102:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32.ld.incp(i32 3, i32 5, i32 [[TMP102]], i32 3, i32 6) +// CHECK-NEXT: [[TMP103:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s32.st.incp(i32 6, i32 0, i32 7, i32 [[TMP103]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8(i32 1, i32 0, i32 3) +// CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8.ld.incp(i32 1, i32 6, i32 [[TMP104]], i32 6, i32 6) +// CHECK-NEXT: [[TMP105:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.s8.st.incp(i32 2, i32 7, i32 1, i32 [[TMP105]], i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16(i32 6, i32 6, i32 3) +// CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16.ld.incp(i32 5, i32 2, i32 [[TMP106]], i32 2, i32 1) +// CHECK-NEXT: [[TMP107:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u16.st.incp(i32 3, i32 6, i32 2, i32 [[TMP107]], i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32(i32 2, i32 3, i32 3) +// CHECK-NEXT: [[TMP108:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32.ld.incp(i32 1, i32 4, i32 [[TMP108]], i32 5, i32 5) +// CHECK-NEXT: [[TMP109:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u32.st.incp(i32 4, i32 2, i32 1, i32 [[TMP109]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8(i32 7, i32 0, i32 4) +// CHECK-NEXT: [[TMP110:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8.ld.incp(i32 1, i32 5, i32 [[TMP110]], i32 4, i32 7) +// CHECK-NEXT: [[TMP111:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmax.u8.st.incp(i32 1, i32 2, i32 5, i32 [[TMP111]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16(i32 5, i32 1, i32 7) +// CHECK-NEXT: [[TMP112:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16.ld.incp(i32 7, i32 6, i32 [[TMP112]], i32 6, i32 4) +// CHECK-NEXT: [[TMP113:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s16.st.incp(i32 7, i32 0, i32 6, i32 [[TMP113]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32(i32 7, i32 4, i32 7) +// CHECK-NEXT: [[TMP114:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32.ld.incp(i32 0, i32 1, i32 [[TMP114]], i32 5, i32 4) +// CHECK-NEXT: [[TMP115:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s32.st.incp(i32 1, i32 6, i32 7, i32 [[TMP115]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8(i32 5, i32 6, i32 4) +// CHECK-NEXT: [[TMP116:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8.ld.incp(i32 1, i32 6, i32 [[TMP116]], i32 6, i32 5) +// CHECK-NEXT: [[TMP117:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.s8.st.incp(i32 7, i32 7, i32 6, i32 [[TMP117]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16(i32 7, i32 1, i32 1) +// CHECK-NEXT: [[TMP118:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16.ld.incp(i32 6, i32 0, i32 [[TMP118]], i32 3, i32 0) +// CHECK-NEXT: [[TMP119:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u16.st.incp(i32 0, i32 7, i32 5, i32 [[TMP119]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32(i32 6, i32 5, i32 0) +// CHECK-NEXT: [[TMP120:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32.ld.incp(i32 3, i32 7, i32 [[TMP120]], i32 1, i32 4) +// CHECK-NEXT: [[TMP121:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u32.st.incp(i32 1, i32 0, i32 2, i32 [[TMP121]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8(i32 2, i32 0, i32 7) +// CHECK-NEXT: [[TMP122:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8.ld.incp(i32 4, i32 2, i32 [[TMP122]], i32 4, i32 3) +// CHECK-NEXT: [[TMP123:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmin.u8.st.incp(i32 1, i32 7, i32 4, i32 [[TMP123]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16(i32 7, i32 5, i32 3) +// CHECK-NEXT: [[TMP124:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.ld.incp(i32 5, i32 4, i32 [[TMP124]], i32 1, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.s8xs8(i32 7, i32 6, i32 4, i32 4) +// CHECK-NEXT: [[TMP125:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s16.st.incp(i32 0, i32 1, i32 5, i32 [[TMP125]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s32.s16xs16(i32 5, i32 3, i32 1, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8(i32 1, i32 6, i32 0) +// CHECK-NEXT: [[TMP126:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8.ld.incp(i32 2, i32 1, i32 [[TMP126]], i32 6, i32 5) +// CHECK-NEXT: [[TMP127:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.s8.st.incp(i32 5, i32 2, i32 1, i32 [[TMP127]], i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16(i32 7, i32 3, i32 6) +// CHECK-NEXT: [[TMP128:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16.ld.incp(i32 3, i32 3, i32 [[TMP128]], i32 2, i32 0) +// CHECK-NEXT: [[TMP129:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u16.st.incp(i32 6, i32 5, i32 0, i32 [[TMP129]], i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8(i32 2, i32 2, i32 7) +// CHECK-NEXT: [[TMP130:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8.ld.incp(i32 1, i32 1, i32 [[TMP130]], i32 6, i32 7) +// CHECK-NEXT: [[TMP131:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vmul.u8.st.incp(i32 5, i32 0, i32 6, i32 [[TMP131]], i32 2) +// CHECK-NEXT: [[TMP132:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vprelu.s16(i32 [[TMP132]], i32 0, i32 7, i32 3) +// CHECK-NEXT: [[TMP133:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vprelu.s8(i32 [[TMP133]], i32 6, i32 6, i32 6) +// CHECK-NEXT: [[TMP134:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP135:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vrelu.s16(i32 [[TMP134]], i32 [[TMP135]], i32 3) +// CHECK-NEXT: [[TMP136:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP137:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vrelu.s8(i32 [[TMP136]], i32 [[TMP137]], i32 7) +// CHECK-NEXT: [[TMP138:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.s16(i32 [[TMP138]], i32 5, i32 4) +// CHECK-NEXT: [[TMP139:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.s8(i32 [[TMP139]], i32 6, i32 6) +// CHECK-NEXT: [[TMP140:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.u16(i32 [[TMP140]], i32 7, i32 2) +// CHECK-NEXT: [[TMP141:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsadds.u8(i32 [[TMP141]], i32 2, i32 0) +// CHECK-NEXT: [[TMP142:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP143:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s16(i32 [[TMP142]], i32 [[TMP143]], i32 7, i32 5) +// CHECK-NEXT: [[TMP144:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP145:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s32(i32 [[TMP144]], i32 [[TMP145]], i32 2, i32 5) +// CHECK-NEXT: [[TMP146:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP147:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.s8(i32 [[TMP146]], i32 [[TMP147]], i32 2, i32 5) +// CHECK-NEXT: [[TMP148:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP149:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u16(i32 [[TMP148]], i32 [[TMP149]], i32 0, i32 2) +// CHECK-NEXT: [[TMP150:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP151:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u32(i32 [[TMP150]], i32 [[TMP151]], i32 4, i32 2) +// CHECK-NEXT: [[TMP152:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP153:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsat.u8(i32 [[TMP152]], i32 [[TMP153]], i32 0, i32 2) +// CHECK-NEXT: [[TMP154:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.s16(i32 [[TMP154]], i32 3, i32 6) +// CHECK-NEXT: [[TMP155:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.s8(i32 [[TMP155]], i32 5, i32 5) +// CHECK-NEXT: [[TMP156:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.u16(i32 [[TMP156]], i32 6, i32 3) +// CHECK-NEXT: [[TMP157:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vssubs.u8(i32 [[TMP157]], i32 0, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16(i32 0, i32 5, i32 3) +// CHECK-NEXT: [[TMP158:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16.ld.incp(i32 0, i32 1, i32 [[TMP158]], i32 5, i32 3) +// CHECK-NEXT: [[TMP159:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s16.st.incp(i32 5, i32 7, i32 7, i32 [[TMP159]], i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32(i32 3, i32 0, i32 3) +// CHECK-NEXT: [[TMP160:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32.ld.incp(i32 1, i32 2, i32 [[TMP160]], i32 0, i32 2) +// CHECK-NEXT: [[TMP161:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s32.st.incp(i32 4, i32 0, i32 0, i32 [[TMP161]], i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8(i32 4, i32 1, i32 3) +// CHECK-NEXT: [[TMP162:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8.ld.incp(i32 3, i32 7, i32 [[TMP162]], i32 3, i32 5) +// CHECK-NEXT: [[TMP163:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.s8.st.incp(i32 5, i32 7, i32 3, i32 [[TMP163]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16(i32 4, i32 6, i32 5) +// CHECK-NEXT: [[TMP164:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16.ld.incp(i32 4, i32 7, i32 [[TMP164]], i32 0, i32 5) +// CHECK-NEXT: [[TMP165:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u16.st.incp(i32 2, i32 2, i32 7, i32 [[TMP165]], i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32(i32 0, i32 1, i32 2) +// CHECK-NEXT: [[TMP166:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32.ld.incp(i32 5, i32 6, i32 [[TMP166]], i32 3, i32 5) +// CHECK-NEXT: [[TMP167:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u32.st.incp(i32 0, i32 1, i32 4, i32 [[TMP167]], i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8(i32 4, i32 2, i32 7) +// CHECK-NEXT: [[TMP168:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8.ld.incp(i32 2, i32 7, i32 [[TMP168]], i32 3, i32 4) +// CHECK-NEXT: [[TMP169:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vsub.u8.st.incp(i32 6, i32 4, i32 7, i32 [[TMP169]], i32 7) +// CHECK-NEXT: [[TMP170:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP171:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP172:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.addx2(i32 [[TMP170]], i32 [[TMP171]], i32 [[TMP172]]) +// CHECK-NEXT: [[TMP173:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP174:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP175:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.addx4(i32 [[TMP173]], i32 [[TMP174]], i32 [[TMP175]]) +// CHECK-NEXT: [[TMP176:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP177:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP178:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.sat(i32 [[TMP176]], i32 [[TMP177]], i32 [[TMP178]]) +// CHECK-NEXT: [[TMP179:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP180:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP181:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.subx2(i32 [[TMP179]], i32 [[TMP180]], i32 [[TMP181]]) +// CHECK-NEXT: [[TMP182:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP183:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP184:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.subx4(i32 [[TMP182]], i32 [[TMP183]], i32 [[TMP184]]) +// CHECK-NEXT: call void @llvm.riscv.esp.andq(i32 0, i32 1, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.notq(i32 0, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.orq(i32 0, i32 6, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.xorq(i32 7, i32 4, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s16(i32 6, i32 6, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s32(i32 6, i32 2, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.s8(i32 7, i32 6, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u16(i32 0, i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u32(i32 6, i32 4, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.eq.u8(i32 6, i32 4, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s16(i32 5, i32 3, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s32(i32 2, i32 4, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.s8(i32 7, i32 7, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u16(i32 2, i32 7, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u32(i32 6, i32 4, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.gt.u8(i32 0, i32 4, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s16(i32 4, i32 6, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s32(i32 2, i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.s8(i32 3, i32 0, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u16(i32 2, i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u32(i32 2, i32 0, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vcmp.lt.u8(i32 0, i32 2, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.s16.qacc(i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.s8.qacc(i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.u16.qacc(i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.mov.u8.qacc(i32 5) +// CHECK-NEXT: [[TMP185:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.16.a(i32 2, i32 8, i32 [[TMP185]]) +// CHECK-NEXT: [[TMP186:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.16.q(i32 [[TMP186]], i32 12, i32 1) +// CHECK-NEXT: [[TMP187:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.32.a(i32 4, i32 2, i32 [[TMP187]]) +// CHECK-NEXT: [[TMP188:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.32.q(i32 [[TMP188]], i32 1, i32 0) +// CHECK-NEXT: [[TMP189:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.8.a(i32 0, i32 13, i32 [[TMP189]]) +// CHECK-NEXT: [[TMP190:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movi.8.q(i32 [[TMP190]], i32 14, i32 3) +// CHECK-NEXT: [[TMP191:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.cfg(i32 [[TMP191]]) +// CHECK-NEXT: [[TMP192:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.fft.bit.width(i32 [[TMP192]]) +// CHECK-NEXT: [[TMP193:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP194:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.perf(i32 [[TMP193]], i32 [[TMP194]]) +// CHECK-NEXT: [[TMP195:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.sar(i32 [[TMP195]]) +// CHECK-NEXT: [[TMP196:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.sar.bytes(i32 [[TMP196]]) +// CHECK-NEXT: [[TMP197:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.xacc.h(i32 [[TMP197]]) +// CHECK-NEXT: [[TMP198:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.r.xacc.l(i32 [[TMP198]]) +// CHECK-NEXT: [[TMP199:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.cfg(i32 [[TMP199]]) +// CHECK-NEXT: [[TMP200:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.fft.bit.width(i32 [[TMP200]]) +// CHECK-NEXT: [[TMP201:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.perf(i32 [[TMP201]]) +// CHECK-NEXT: [[TMP202:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.sar(i32 [[TMP202]]) +// CHECK-NEXT: [[TMP203:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.sar.bytes(i32 [[TMP203]]) +// CHECK-NEXT: [[TMP204:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.xacc.h(i32 [[TMP204]]) +// CHECK-NEXT: [[TMP205:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.movx.w.xacc.l(i32 [[TMP205]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.s16(i32 0, i32 4, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.s8(i32 0, i32 7, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.u16(i32 1, i32 0, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vext.u8(i32 4, i32 1, i32 6) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.16(i32 3, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.32(i32 6, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzip.8(i32 3, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzipt.16(i32 1, i32 5, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vunzipt.8(i32 7, i32 5, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vzip.16(i32 2, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vzip.32(i32 0, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.vzip.8(i32 6, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vzipt.16(i32 6, i32 3, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vzipt.8(i32 7, i32 0, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.zero.q(i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.zero.qacc() +// CHECK-NEXT: call void @llvm.riscv.esp.zero.xacc() +// CHECK-NEXT: [[TMP206:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.ld.incp(i32 1, i32 1, i32 3, i32 [[TMP206]], i32 0, i32 6, i32 0, i32 3) +// CHECK-NEXT: [[TMP207:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32 3, i32 0, i32 1, i32 [[TMP207]], i32 0, i32 3, i32 3, i32 1) +// CHECK-NEXT: [[TMP208:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32 2, i32 3, i32 7, i32 [[TMP208]], i32 0, i32 1, i32 1, i32 4) +// CHECK-NEXT: [[TMP209:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP210:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.ams.s16.st.incp(i32 4, i32 4, i32 0, i32 5, i32 [[TMP209]], i32 [[TMP210]], i32 1, i32 1) +// CHECK-NEXT: [[TMP211:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.bitrev(i32 [[TMP211]], i32 6) +// CHECK-NEXT: [[TMP212:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP213:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32 [[TMP212]], i32 7, i32 0, i32 [[TMP213]], i32 2, i32 1, i32 2) +// CHECK-NEXT: [[TMP214:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP215:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.cmul.s16.st.xp(i32 [[TMP214]], i32 6, i32 0, i32 7, i32 [[TMP215]], i32 0, i32 1, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.fft.r2bf.s16(i32 2, i32 5, i32 0, i32 7, i32 5) +// CHECK-NEXT: [[TMP216:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32 1, i32 7, i32 [[TMP216]], i32 1, i32 6) +// CHECK-NEXT: [[TMP217:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.fft.vst.r32.decp(i32 2, i32 [[TMP217]], i32 1) +// CHECK-NEXT: [[TMP218:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.128.usar.ip(i32 [[TMP218]], i32 -464, i32 7) +// CHECK-NEXT: [[TMP219:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP220:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.128.usar.xp(i32 [[TMP219]], i32 [[TMP220]], i32 0) +// CHECK-NEXT: [[TMP221:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.xacc.ip(i32 [[TMP221]], i32 -224) +// CHECK-NEXT: [[TMP222:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s16.128.ip(i32 [[TMP222]], i32 288) +// CHECK-NEXT: [[TMP223:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP224:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s16.128.xp(i32 [[TMP223]], i32 [[TMP224]]) +// CHECK-NEXT: [[TMP225:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s8.128.ip(i32 [[TMP225]], i32 -1408) +// CHECK-NEXT: [[TMP226:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP227:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.s8.128.xp(i32 [[TMP226]], i32 [[TMP227]]) +// CHECK-NEXT: [[TMP228:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u16.128.ip(i32 [[TMP228]], i32 -1440) +// CHECK-NEXT: [[TMP229:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP230:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u16.128.xp(i32 [[TMP229]], i32 [[TMP230]]) +// CHECK-NEXT: [[TMP231:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u8.128.ip(i32 [[TMP231]], i32 -816) +// CHECK-NEXT: [[TMP232:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP233:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldqa.u8.128.xp(i32 [[TMP232]], i32 [[TMP233]]) +// CHECK-NEXT: [[TMP234:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.16.ip(i32 [[TMP234]], i32 380, i32 2) +// CHECK-NEXT: [[TMP235:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP236:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.16.xp(i32 [[TMP235]], i32 [[TMP236]], i32 3) +// CHECK-NEXT: [[TMP237:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.32.ip(i32 [[TMP237]], i32 -292, i32 7) +// CHECK-NEXT: [[TMP238:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP239:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.32.xp(i32 [[TMP238]], i32 [[TMP239]], i32 1) +// CHECK-NEXT: [[TMP240:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.8.ip(i32 [[TMP240]], i32 -416, i32 5) +// CHECK-NEXT: [[TMP241:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP242:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldbc.8.xp(i32 [[TMP241]], i32 [[TMP242]], i32 7) +// CHECK-NEXT: [[TMP243:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s16.ip(i32 [[TMP243]], i32 -80, i32 0, i32 3) +// CHECK-NEXT: [[TMP244:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP245:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s16.xp(i32 [[TMP244]], i32 [[TMP245]], i32 2, i32 5) +// CHECK-NEXT: [[TMP246:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s8.ip(i32 [[TMP246]], i32 0, i32 2, i32 7) +// CHECK-NEXT: [[TMP247:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP248:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.s8.xp(i32 [[TMP247]], i32 [[TMP248]], i32 7, i32 5) +// CHECK-NEXT: [[TMP249:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u16.ip(i32 [[TMP249]], i32 32, i32 0, i32 6) +// CHECK-NEXT: [[TMP250:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP251:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u16.xp(i32 [[TMP250]], i32 [[TMP251]], i32 7, i32 6) +// CHECK-NEXT: [[TMP252:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u8.ip(i32 [[TMP252]], i32 -16, i32 3, i32 1) +// CHECK-NEXT: [[TMP253:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP254:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldext.u8.xp(i32 [[TMP253]], i32 [[TMP254]], i32 5, i32 4) +// CHECK-NEXT: [[TMP255:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vldhbc.16.incp(i32 [[TMP255]], i32 2, i32 3) +// CHECK-NEXT: [[TMP256:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32 [[TMP256]], i32 -240) +// CHECK-NEXT: [[TMP257:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32 [[TMP257]], i32 -32) +// CHECK-NEXT: [[TMP258:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32 [[TMP258]], i32 -64) +// CHECK-NEXT: [[TMP259:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32 [[TMP259]], i32 -80) +// CHECK-NEXT: [[TMP260:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ld.ua.state.ip(i32 [[TMP260]], i32 1504) +// CHECK-NEXT: [[TMP261:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.ldxq.32(i32 [[TMP261]], i32 6, i32 1, i32 7, i32 1) +// CHECK-NEXT: [[TMP262:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.h.h.128.ip(i32 [[TMP262]], i32 -480) +// CHECK-NEXT: [[TMP263:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.h.l.128.ip(i32 [[TMP263]], i32 -1712) +// CHECK-NEXT: [[TMP264:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.l.h.128.ip(i32 [[TMP264]], i32 960) +// CHECK-NEXT: [[TMP265:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.qacc.l.l.128.ip(i32 [[TMP265]], i32 1920) +// CHECK-NEXT: [[TMP266:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.ua.state.ip(i32 [[TMP266]], i32 -1360) +// CHECK-NEXT: [[TMP267:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.stxq.32(i32 [[TMP267]], i32 6, i32 2, i32 3, i32 0) +// CHECK-NEXT: [[TMP268:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.128.ip(i32 [[TMP268]], i32 -1136, i32 0) +// CHECK-NEXT: [[TMP269:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP270:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.128.xp(i32 [[TMP269]], i32 [[TMP270]], i32 5) +// CHECK-NEXT: [[TMP271:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.h.64.ip(i32 [[TMP271]], i32 1008, i32 4) +// CHECK-NEXT: [[TMP272:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP273:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.h.64.xp(i32 [[TMP272]], i32 [[TMP273]], i32 2) +// CHECK-NEXT: [[TMP274:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.l.64.ip(i32 [[TMP274]], i32 -304, i32 6) +// CHECK-NEXT: [[TMP275:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP276:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vld.l.64.xp(i32 [[TMP275]], i32 [[TMP276]], i32 6) +// CHECK-NEXT: [[TMP277:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.128.ip(i32 0, i32 [[TMP277]], i32 -1216) +// CHECK-NEXT: [[TMP278:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP279:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.128.xp(i32 [[TMP278]], i32 6, i32 [[TMP279]]) +// CHECK-NEXT: [[TMP280:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.h.64.ip(i32 1, i32 [[TMP280]], i32 -456) +// CHECK-NEXT: [[TMP281:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP282:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.h.64.xp(i32 [[TMP281]], i32 2, i32 [[TMP282]]) +// CHECK-NEXT: [[TMP283:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.l.64.ip(i32 6, i32 [[TMP283]], i32 664) +// CHECK-NEXT: [[TMP284:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP285:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.vst.l.64.xp(i32 [[TMP284]], i32 4, i32 [[TMP285]]) +// CHECK-NEXT: call void @llvm.riscv.esp.slci.2q(i32 2, i32 0, i32 14) +// CHECK-NEXT: [[TMP286:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP287:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.slcxxp.2q(i32 [[TMP286]], i32 [[TMP287]], i32 0, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.src.q(i32 7, i32 3, i32 2) +// CHECK-NEXT: [[TMP288:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.src.q.ld.ip(i32 1, i32 [[TMP288]], i32 4, i32 1168, i32 4) +// CHECK-NEXT: [[TMP289:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP290:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.src.q.ld.xp(i32 [[TMP289]], i32 0, i32 [[TMP290]], i32 1, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.src.q.qup(i32 3, i32 3, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.srci.2q(i32 7, i32 4, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s16.q.qacc(i32 2, i32 1, i32 5) +// CHECK-NEXT: [[TMP291:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s16.qacc(i32 [[TMP291]], i32 0, i32 7) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s8.q.qacc(i32 7, i32 0, i32 3) +// CHECK-NEXT: [[TMP292:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.s8.qacc(i32 [[TMP292]], i32 1, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u16.q.qacc(i32 6, i32 1, i32 0) +// CHECK-NEXT: [[TMP293:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u16.qacc(i32 [[TMP293]], i32 0, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u8.q.qacc(i32 6, i32 0, i32 7) +// CHECK-NEXT: [[TMP294:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcmb.u8.qacc(i32 [[TMP294]], i32 1, i32 2) +// CHECK-NEXT: [[TMP295:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcq.128.st.incp(i32 0, i32 5, i32 [[TMP295]]) +// CHECK-NEXT: [[TMP296:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP297:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srcxxp.2q(i32 [[TMP296]], i32 [[TMP297]], i32 7, i32 5) +// CHECK-NEXT: [[TMP298:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP299:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srs.s.xacc(i32 [[TMP298]], i32 [[TMP299]]) +// CHECK-NEXT: [[TMP300:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: [[TMP301:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.srs.u.xacc(i32 [[TMP300]], i32 [[TMP301]]) +// CHECK-NEXT: call void @llvm.riscv.esp.vsl.32(i32 0, i32 3) +// CHECK-NEXT: call void @llvm.riscv.esp.vsld.16(i32 6, i32 4, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vsld.32(i32 2, i32 7, i32 5) +// CHECK-NEXT: call void @llvm.riscv.esp.vsld.8(i32 1, i32 0, i32 0) +// CHECK-NEXT: call void @llvm.riscv.esp.vsr.s32(i32 6, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsr.u32(i32 3, i32 2) +// CHECK-NEXT: call void @llvm.riscv.esp.vsrd.16(i32 6, i32 2, i32 1) +// CHECK-NEXT: call void @llvm.riscv.esp.vsrd.32(i32 7, i32 5, i32 4) +// CHECK-NEXT: call void @llvm.riscv.esp.vsrd.8(i32 2, i32 1, i32 4) +// CHECK-NEXT: [[TMP302:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.s.xacc.ip(i32 [[TMP302]], i32 912) +// CHECK-NEXT: [[TMP303:%.*]] = load i32, ptr [[DATA]], align 4 +// CHECK-NEXT: call void @llvm.riscv.esp.st.u.xacc.ip(i32 [[TMP303]], i32 -112) +// CHECK-NEXT: ret void +// +void test() { + uint32_t data = 10; + __builtin_riscv_esp_vcmulas_s16_qacc_h(4, 2); +__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_ip(4, 0, data, -96, 3); +__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_xp(data, 5, 5, data, 5); +__builtin_riscv_esp_vcmulas_s16_qacc_l(6, 1); +__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_ip(2, 3, data, -48, 3); +__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_xp(data, 7, 2, data, 1); +__builtin_riscv_esp_vcmulas_s8_qacc_h(4, 4); +__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_ip(7, 4, data, -128, 4); +__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_xp(data, 2, 3, data, 1); +__builtin_riscv_esp_vcmulas_s8_qacc_l(6, 4); +__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_ip(5, 5, data, 16, 7); +__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_xp(data, 4, 4, data, 2); +__builtin_riscv_esp_vmulas_s16_qacc(7, 6); +__builtin_riscv_esp_vmulas_s16_qacc_ld_ip(0, 4, data, 96, 4); +__builtin_riscv_esp_vmulas_s16_qacc_ld_xp(data, 4, 4, data, 7); +__builtin_riscv_esp_vmulas_s16_qacc_st_ip(2, 1, 7, data, -128); +__builtin_riscv_esp_vmulas_s16_qacc_st_xp(data, 1, 2, 6, data); +__builtin_riscv_esp_vmulas_s16_xacc(1, 3); +__builtin_riscv_esp_vmulas_s16_xacc_ld_ip(7, 3, data, -96, 5); +__builtin_riscv_esp_vmulas_s16_xacc_ld_xp(data, 3, 1, data, 1); +__builtin_riscv_esp_vmulas_s16_xacc_st_ip(2, 0, 0, data, 64); +__builtin_riscv_esp_vmulas_s16_xacc_st_xp(data, 6, 3, 6, data); +__builtin_riscv_esp_vmulas_s8_qacc(0, 0); +__builtin_riscv_esp_vmulas_s8_qacc_ld_ip(0, 3, data, 0, 7); +__builtin_riscv_esp_vmulas_s8_qacc_ld_xp(data, 4, 3, data, 4); +__builtin_riscv_esp_vmulas_s8_qacc_st_ip(3, 3, 5, data, -64); +__builtin_riscv_esp_vmulas_s8_qacc_st_xp(data, 4, 7, 0, data); +__builtin_riscv_esp_vmulas_s8_xacc(3, 3); +__builtin_riscv_esp_vmulas_s8_xacc_ld_ip(3, 2, data, 0, 5); +__builtin_riscv_esp_vmulas_s8_xacc_ld_xp(data, 6, 3, data, 0); +__builtin_riscv_esp_vmulas_s8_xacc_st_ip(1, 7, 7, data, -32); +__builtin_riscv_esp_vmulas_s8_xacc_st_xp(data, 6, 7, 6, data); +__builtin_riscv_esp_vmulas_u16_qacc(5, 4); +__builtin_riscv_esp_vmulas_u16_qacc_ld_ip(5, 2, data, 64, 6); +__builtin_riscv_esp_vmulas_u16_qacc_ld_xp(data, 5, 7, data, 7); +__builtin_riscv_esp_vmulas_u16_qacc_st_ip(1, 4, 3, data, -96); +__builtin_riscv_esp_vmulas_u16_qacc_st_xp(data, 5, 0, 2, data); +__builtin_riscv_esp_vmulas_u16_xacc(0, 7); +__builtin_riscv_esp_vmulas_u16_xacc_ld_ip(6, 6, data, -96, 4); +__builtin_riscv_esp_vmulas_u16_xacc_ld_xp(data, 6, 5, data, 6); +__builtin_riscv_esp_vmulas_u16_xacc_st_ip(3, 0, 4, data, 64); +__builtin_riscv_esp_vmulas_u16_xacc_st_xp(data, 1, 0, 4, data); +__builtin_riscv_esp_vmulas_u8_qacc(5, 4); +__builtin_riscv_esp_vmulas_u8_qacc_ld_ip(5, 3, data, 80, 5); +__builtin_riscv_esp_vmulas_u8_qacc_ld_xp(data, 4, 7, data, 4); +__builtin_riscv_esp_vmulas_u8_qacc_st_ip(3, 3, 5, data, -96); +__builtin_riscv_esp_vmulas_u8_qacc_st_xp(data, 6, 7, 3, data); +__builtin_riscv_esp_vmulas_u8_xacc(0, 1); +__builtin_riscv_esp_vmulas_u8_xacc_ld_ip(6, 0, data, -32, 7); +__builtin_riscv_esp_vmulas_u8_xacc_ld_xp(data, 3, 3, data, 5); +__builtin_riscv_esp_vmulas_u8_xacc_st_ip(7, 0, 4, data, 32); +__builtin_riscv_esp_vmulas_u8_xacc_st_xp(data, 1, 0, 0, data); +__builtin_riscv_esp_vmulas_s16_qacc_ldbc_incp(3, 6, data, 7); +__builtin_riscv_esp_vmulas_s8_qacc_ldbc_incp(5, 3, data, 6); +__builtin_riscv_esp_vmulas_u16_qacc_ldbc_incp(0, 3, data, 2); +__builtin_riscv_esp_vmulas_u8_qacc_ldbc_incp(4, 7, data, 3); +__builtin_riscv_esp_vsmulas_s16_qacc(7, 7, 4); +__builtin_riscv_esp_vsmulas_s16_qacc_ld_incp(7, 7, data, 4, 1); +__builtin_riscv_esp_vsmulas_s8_qacc(7, 0, 7); +__builtin_riscv_esp_vsmulas_s8_qacc_ld_incp(5, 6, data, 15, 2); +__builtin_riscv_esp_vsmulas_u16_qacc(7, 0, 10); +__builtin_riscv_esp_vsmulas_u16_qacc_ld_incp(7, 6, data, 1, 0); +__builtin_riscv_esp_vsmulas_u8_qacc(3, 6, 5); +__builtin_riscv_esp_vsmulas_u8_qacc_ld_incp(6, 1, data, 4, 0); +__builtin_riscv_esp_cmul_s16(2, 1, 3, 1); +__builtin_riscv_esp_cmul_s16_ld_incp(2, 7, data, 0, 5, 0); +__builtin_riscv_esp_cmul_s16_st_incp(7, 4, 6, data, 2, 5); +__builtin_riscv_esp_cmul_s8(5, 7, 2, 4); +__builtin_riscv_esp_cmul_s8_ld_incp(0, 6, data, 2, 7, 5); +__builtin_riscv_esp_cmul_s8_st_incp(1, 6, 5, data, 0, 2); +__builtin_riscv_esp_cmul_u16(7, 4, 0, 0); +__builtin_riscv_esp_cmul_u16_ld_incp(2, 0, data, 3, 1, 1); +__builtin_riscv_esp_cmul_u16_st_incp(4, 3, 4, data, 1, 2); +__builtin_riscv_esp_cmul_u8(3, 4, 1, 5); +__builtin_riscv_esp_cmul_u8_ld_incp(5, 0, data, 1, 5, 1); +__builtin_riscv_esp_cmul_u8_st_incp(2, 7, 4, data, 3, 1); +__builtin_riscv_esp_max_s16_a(2, data); +__builtin_riscv_esp_max_s32_a(0, data); +__builtin_riscv_esp_max_s8_a(7, data); +__builtin_riscv_esp_max_u16_a(4, data); +__builtin_riscv_esp_max_u32_a(4, data); +__builtin_riscv_esp_max_u8_a(3, data); +__builtin_riscv_esp_min_s16_a(0, data); +__builtin_riscv_esp_min_s32_a(7, data); +__builtin_riscv_esp_min_s8_a(4, data); +__builtin_riscv_esp_min_u16_a(7, data); +__builtin_riscv_esp_min_u32_a(6, data); +__builtin_riscv_esp_min_u8_a(1, data); +__builtin_riscv_esp_vabs_16(7, 0); +__builtin_riscv_esp_vabs_32(0, 3); +__builtin_riscv_esp_vabs_8(5, 2); +__builtin_riscv_esp_vadd_s16(0, 4, 0); +__builtin_riscv_esp_vadd_s16_ld_incp(4, 2, data, 0, 7); +__builtin_riscv_esp_vadd_s16_st_incp(5, 7, 0, data, 5); +__builtin_riscv_esp_vadd_s32(6, 5, 0); +__builtin_riscv_esp_vadd_s32_ld_incp(5, 6, data, 0, 2); +__builtin_riscv_esp_vadd_s32_st_incp(7, 7, 0, data, 1); +__builtin_riscv_esp_vadd_s8(6, 5, 5); +__builtin_riscv_esp_vadd_s8_ld_incp(2, 4, data, 6, 7); +__builtin_riscv_esp_vadd_s8_st_incp(4, 6, 4, data, 7); +__builtin_riscv_esp_vadd_u16(0, 6, 5); +__builtin_riscv_esp_vadd_u16_ld_incp(6, 7, data, 5, 1); +__builtin_riscv_esp_vadd_u16_st_incp(1, 3, 4, data, 5); +__builtin_riscv_esp_vadd_u32(7, 3, 0); +__builtin_riscv_esp_vadd_u32_ld_incp(0, 4, data, 5, 5); +__builtin_riscv_esp_vadd_u32_st_incp(1, 5, 6, data, 1); +__builtin_riscv_esp_vadd_u8(0, 1, 5); +__builtin_riscv_esp_vadd_u8_ld_incp(5, 1, data, 2, 6); +__builtin_riscv_esp_vadd_u8_st_incp(1, 7, 4, data, 2); +__builtin_riscv_esp_vclamp_s16(3, 12, 5); +__builtin_riscv_esp_vmax_s16(1, 2, 2); +__builtin_riscv_esp_vmax_s16_ld_incp(3, 0, data, 5, 1); +__builtin_riscv_esp_vmax_s16_st_incp(0, 4, 2, data, 3); +__builtin_riscv_esp_vmax_s32(0, 2, 4); +__builtin_riscv_esp_vmax_s32_ld_incp(3, 5, data, 3, 6); +__builtin_riscv_esp_vmax_s32_st_incp(6, 0, 7, data, 4); +__builtin_riscv_esp_vmax_s8(1, 0, 3); +__builtin_riscv_esp_vmax_s8_ld_incp(1, 6, data, 6, 6); +__builtin_riscv_esp_vmax_s8_st_incp(2, 7, 1, data, 0); +__builtin_riscv_esp_vmax_u16(6, 6, 3); +__builtin_riscv_esp_vmax_u16_ld_incp(5, 2, data, 2, 1); +__builtin_riscv_esp_vmax_u16_st_incp(3, 6, 2, data, 0); +__builtin_riscv_esp_vmax_u32(2, 3, 3); +__builtin_riscv_esp_vmax_u32_ld_incp(1, 4, data, 5, 5); +__builtin_riscv_esp_vmax_u32_st_incp(4, 2, 1, data, 4); +__builtin_riscv_esp_vmax_u8(7, 0, 4); +__builtin_riscv_esp_vmax_u8_ld_incp(1, 5, data, 4, 7); +__builtin_riscv_esp_vmax_u8_st_incp(1, 2, 5, data, 7); +__builtin_riscv_esp_vmin_s16(5, 1, 7); +__builtin_riscv_esp_vmin_s16_ld_incp(7, 6, data, 6, 4); +__builtin_riscv_esp_vmin_s16_st_incp(7, 0, 6, data, 2); +__builtin_riscv_esp_vmin_s32(7, 4, 7); +__builtin_riscv_esp_vmin_s32_ld_incp(0, 1, data, 5, 4); +__builtin_riscv_esp_vmin_s32_st_incp(1, 6, 7, data, 4); +__builtin_riscv_esp_vmin_s8(5, 6, 4); +__builtin_riscv_esp_vmin_s8_ld_incp(1, 6, data, 6, 5); +__builtin_riscv_esp_vmin_s8_st_incp(7, 7, 6, data, 7); +__builtin_riscv_esp_vmin_u16(7, 1, 1); +__builtin_riscv_esp_vmin_u16_ld_incp(6, 0, data, 3, 0); +__builtin_riscv_esp_vmin_u16_st_incp(0, 7, 5, data, 3); +__builtin_riscv_esp_vmin_u32(6, 5, 0); +__builtin_riscv_esp_vmin_u32_ld_incp(3, 7, data, 1, 4); +__builtin_riscv_esp_vmin_u32_st_incp(1, 0, 2, data, 3); +__builtin_riscv_esp_vmin_u8(2, 0, 7); +__builtin_riscv_esp_vmin_u8_ld_incp(4, 2, data, 4, 3); +__builtin_riscv_esp_vmin_u8_st_incp(1, 7, 4, data, 4); +__builtin_riscv_esp_vmul_s16(7, 5, 3); +__builtin_riscv_esp_vmul_s16_ld_incp(5, 4, data, 1, 6); +__builtin_riscv_esp_vmul_s16_s8xs8(7, 6, 4, 4); +__builtin_riscv_esp_vmul_s16_st_incp(0, 1, 5, data, 7); +__builtin_riscv_esp_vmul_s32_s16xs16(5, 3, 1, 2); +__builtin_riscv_esp_vmul_s8(1, 6, 0); +__builtin_riscv_esp_vmul_s8_ld_incp(2, 1, data, 6, 5); +__builtin_riscv_esp_vmul_s8_st_incp(5, 2, 1, data, 7); +__builtin_riscv_esp_vmul_u16(7, 3, 6); +__builtin_riscv_esp_vmul_u16_ld_incp(3, 3, data, 2, 0); +__builtin_riscv_esp_vmul_u16_st_incp(6, 5, 0, data, 1); +__builtin_riscv_esp_vmul_u8(2, 2, 7); +__builtin_riscv_esp_vmul_u8_ld_incp(1, 1, data, 6, 7); +__builtin_riscv_esp_vmul_u8_st_incp(5, 0, 6, data, 2); +__builtin_riscv_esp_vprelu_s16(data, 0, 7, 3); +__builtin_riscv_esp_vprelu_s8(data, 6, 6, 6); +__builtin_riscv_esp_vrelu_s16(data, data, 3); +__builtin_riscv_esp_vrelu_s8(data, data, 7); +__builtin_riscv_esp_vsadds_s16(data, 5, 4); +__builtin_riscv_esp_vsadds_s8(data, 6, 6); +__builtin_riscv_esp_vsadds_u16(data, 7, 2); +__builtin_riscv_esp_vsadds_u8(data, 2, 0); +__builtin_riscv_esp_vsat_s16(data, data, 7, 5); +__builtin_riscv_esp_vsat_s32(data, data, 2, 5); +__builtin_riscv_esp_vsat_s8(data, data, 2, 5); +__builtin_riscv_esp_vsat_u16(data, data, 0, 2); +__builtin_riscv_esp_vsat_u32(data, data, 4, 2); +__builtin_riscv_esp_vsat_u8(data, data, 0, 2); +__builtin_riscv_esp_vssubs_s16(data, 3, 6); +__builtin_riscv_esp_vssubs_s8(data, 5, 5); +__builtin_riscv_esp_vssubs_u16(data, 6, 3); +__builtin_riscv_esp_vssubs_u8(data, 0, 3); +__builtin_riscv_esp_vsub_s16(0, 5, 3); +__builtin_riscv_esp_vsub_s16_ld_incp(0, 1, data, 5, 3); +__builtin_riscv_esp_vsub_s16_st_incp(5, 7, 7, data, 4); +__builtin_riscv_esp_vsub_s32(3, 0, 3); +__builtin_riscv_esp_vsub_s32_ld_incp(1, 2, data, 0, 2); +__builtin_riscv_esp_vsub_s32_st_incp(4, 0, 0, data, 5); +__builtin_riscv_esp_vsub_s8(4, 1, 3); +__builtin_riscv_esp_vsub_s8_ld_incp(3, 7, data, 3, 5); +__builtin_riscv_esp_vsub_s8_st_incp(5, 7, 3, data, 3); +__builtin_riscv_esp_vsub_u16(4, 6, 5); +__builtin_riscv_esp_vsub_u16_ld_incp(4, 7, data, 0, 5); +__builtin_riscv_esp_vsub_u16_st_incp(2, 2, 7, data, 3); +__builtin_riscv_esp_vsub_u32(0, 1, 2); +__builtin_riscv_esp_vsub_u32_ld_incp(5, 6, data, 3, 5); +__builtin_riscv_esp_vsub_u32_st_incp(0, 1, 4, data, 2); +__builtin_riscv_esp_vsub_u8(4, 2, 7); +__builtin_riscv_esp_vsub_u8_ld_incp(2, 7, data, 3, 4); +__builtin_riscv_esp_vsub_u8_st_incp(6, 4, 7, data, 7); +__builtin_riscv_esp_addx2(data, data, data); +__builtin_riscv_esp_addx4(data, data, data); +__builtin_riscv_esp_sat(data, data, data); +__builtin_riscv_esp_subx2(data, data, data); +__builtin_riscv_esp_subx4(data, data, data); +__builtin_riscv_esp_andq(0, 1, 4); +__builtin_riscv_esp_notq(0, 1); +__builtin_riscv_esp_orq(0, 6, 3); +__builtin_riscv_esp_xorq(7, 4, 7); +__builtin_riscv_esp_vcmp_eq_s16(6, 6, 3); +__builtin_riscv_esp_vcmp_eq_s32(6, 2, 1); +__builtin_riscv_esp_vcmp_eq_s8(7, 6, 0); +__builtin_riscv_esp_vcmp_eq_u16(0, 2, 5); +__builtin_riscv_esp_vcmp_eq_u32(6, 4, 3); +__builtin_riscv_esp_vcmp_eq_u8(6, 4, 5); +__builtin_riscv_esp_vcmp_gt_s16(5, 3, 6); +__builtin_riscv_esp_vcmp_gt_s32(2, 4, 5); +__builtin_riscv_esp_vcmp_gt_s8(7, 7, 4); +__builtin_riscv_esp_vcmp_gt_u16(2, 7, 7); +__builtin_riscv_esp_vcmp_gt_u32(6, 4, 2); +__builtin_riscv_esp_vcmp_gt_u8(0, 4, 4); +__builtin_riscv_esp_vcmp_lt_s16(4, 6, 5); +__builtin_riscv_esp_vcmp_lt_s32(2, 4, 1); +__builtin_riscv_esp_vcmp_lt_s8(3, 0, 2); +__builtin_riscv_esp_vcmp_lt_u16(2, 4, 1); +__builtin_riscv_esp_vcmp_lt_u32(2, 0, 5); +__builtin_riscv_esp_vcmp_lt_u8(0, 2, 5); +__builtin_riscv_esp_mov_s16_qacc(4); +__builtin_riscv_esp_mov_s8_qacc(5); +__builtin_riscv_esp_mov_u16_qacc(5); +__builtin_riscv_esp_mov_u8_qacc(5); +__builtin_riscv_esp_movi_16_a(2, 8, data); +__builtin_riscv_esp_movi_16_q(data, 12, 1); +__builtin_riscv_esp_movi_32_a(4, 2, data); +__builtin_riscv_esp_movi_32_q(data, 1, 0); +__builtin_riscv_esp_movi_8_a(0, 13, data); +__builtin_riscv_esp_movi_8_q(data, 14, 3); +__builtin_riscv_esp_movx_r_cfg(data); +__builtin_riscv_esp_movx_r_fft_bit_width(data); +__builtin_riscv_esp_movx_r_perf(data, data); +__builtin_riscv_esp_movx_r_sar(data); +__builtin_riscv_esp_movx_r_sar_bytes(data); +__builtin_riscv_esp_movx_r_xacc_h(data); +__builtin_riscv_esp_movx_r_xacc_l(data); +__builtin_riscv_esp_movx_w_cfg(data); +__builtin_riscv_esp_movx_w_fft_bit_width(data); +__builtin_riscv_esp_movx_w_perf(data); +__builtin_riscv_esp_movx_w_sar(data); +__builtin_riscv_esp_movx_w_sar_bytes(data); +__builtin_riscv_esp_movx_w_xacc_h(data); +__builtin_riscv_esp_movx_w_xacc_l(data); +__builtin_riscv_esp_vext_s16(0, 4, 6); +__builtin_riscv_esp_vext_s8(0, 7, 1); +__builtin_riscv_esp_vext_u16(1, 0, 6); +__builtin_riscv_esp_vext_u8(4, 1, 6); +__builtin_riscv_esp_vunzip_16(3, 2); +__builtin_riscv_esp_vunzip_32(6, 1); +__builtin_riscv_esp_vunzip_8(3, 5); +__builtin_riscv_esp_vunzipt_16(1, 5, 4); +__builtin_riscv_esp_vunzipt_8(7, 5, 7); +__builtin_riscv_esp_vzip_16(2, 2); +__builtin_riscv_esp_vzip_32(0, 7); +__builtin_riscv_esp_vzip_8(6, 4); +__builtin_riscv_esp_vzipt_16(6, 3, 0); +__builtin_riscv_esp_vzipt_8(7, 0, 1); +__builtin_riscv_esp_zero_q(3); +__builtin_riscv_esp_zero_qacc(); +__builtin_riscv_esp_zero_xacc(); +__builtin_riscv_esp_fft_ams_s16_ld_incp(1, 1, 3, data, 0, 6, 0, 3); +__builtin_riscv_esp_fft_ams_s16_ld_incp_uaup(3, 0, 1, data, 0, 3, 3, 1); +__builtin_riscv_esp_fft_ams_s16_ld_r32_decp(2, 3, 7, data, 0, 1, 1, 4); +__builtin_riscv_esp_fft_ams_s16_st_incp(4, 4, 0, 5, data, data, 1, 1); +__builtin_riscv_esp_fft_bitrev(data, 6); +__builtin_riscv_esp_fft_cmul_s16_ld_xp(data, 7, 0, data, 2, 1, 2); +__builtin_riscv_esp_fft_cmul_s16_st_xp(data, 6, 0, 7, data, 0, 1, 0); +__builtin_riscv_esp_fft_r2bf_s16(2, 5, 0, 7, 5); +__builtin_riscv_esp_fft_r2bf_s16_st_incp(1, 7, data, 1, 6); +__builtin_riscv_esp_fft_vst_r32_decp(2, data, 1); +__builtin_riscv_esp_ld_128_usar_ip(data, -464, 7); +__builtin_riscv_esp_ld_128_usar_xp(data, data, 0); +__builtin_riscv_esp_ld_xacc_ip(data, -224); +__builtin_riscv_esp_ldqa_s16_128_ip(data, 288); +__builtin_riscv_esp_ldqa_s16_128_xp(data, data); +__builtin_riscv_esp_ldqa_s8_128_ip(data, -1408); +__builtin_riscv_esp_ldqa_s8_128_xp(data, data); +__builtin_riscv_esp_ldqa_u16_128_ip(data, -1440); +__builtin_riscv_esp_ldqa_u16_128_xp(data, data); +__builtin_riscv_esp_ldqa_u8_128_ip(data, -816); +__builtin_riscv_esp_ldqa_u8_128_xp(data, data); +__builtin_riscv_esp_vldbc_16_ip(data, 380, 2); +__builtin_riscv_esp_vldbc_16_xp(data, data, 3); +__builtin_riscv_esp_vldbc_32_ip(data, -292, 7); +__builtin_riscv_esp_vldbc_32_xp(data, data, 1); +__builtin_riscv_esp_vldbc_8_ip(data, -416, 5); +__builtin_riscv_esp_vldbc_8_xp(data, data, 7); +__builtin_riscv_esp_vldext_s16_ip(data, -80, 0, 3); +__builtin_riscv_esp_vldext_s16_xp(data, data, 2, 5); +__builtin_riscv_esp_vldext_s8_ip(data, 0, 2, 7); +__builtin_riscv_esp_vldext_s8_xp(data, data, 7, 5); +__builtin_riscv_esp_vldext_u16_ip(data, 32, 0, 6); +__builtin_riscv_esp_vldext_u16_xp(data, data, 7, 6); +__builtin_riscv_esp_vldext_u8_ip(data, -16, 3, 1); +__builtin_riscv_esp_vldext_u8_xp(data, data, 5, 4); +__builtin_riscv_esp_vldhbc_16_incp(data, 2, 3); +__builtin_riscv_esp_ld_qacc_h_h_128_ip(data, -240); +__builtin_riscv_esp_ld_qacc_h_l_128_ip(data, -32); +__builtin_riscv_esp_ld_qacc_l_h_128_ip(data, -64); +__builtin_riscv_esp_ld_qacc_l_l_128_ip(data, -80); +__builtin_riscv_esp_ld_ua_state_ip(data, 1504); +__builtin_riscv_esp_ldxq_32(data, 6, 1, 7, 1); +__builtin_riscv_esp_st_qacc_h_h_128_ip(data, -480); +__builtin_riscv_esp_st_qacc_h_l_128_ip(data, -1712); +__builtin_riscv_esp_st_qacc_l_h_128_ip(data, 960); +__builtin_riscv_esp_st_qacc_l_l_128_ip(data, 1920); +__builtin_riscv_esp_st_ua_state_ip(data, -1360); +__builtin_riscv_esp_stxq_32(data, 6, 2, 3, 0); +__builtin_riscv_esp_vld_128_ip(data, -1136, 0); +__builtin_riscv_esp_vld_128_xp(data, data, 5); +__builtin_riscv_esp_vld_h_64_ip(data, 1008, 4); +__builtin_riscv_esp_vld_h_64_xp(data, data, 2); +__builtin_riscv_esp_vld_l_64_ip(data, -304, 6); +__builtin_riscv_esp_vld_l_64_xp(data, data, 6); +__builtin_riscv_esp_vst_128_ip(0, data, -1216); +__builtin_riscv_esp_vst_128_xp(data, 6, data); +__builtin_riscv_esp_vst_h_64_ip(1, data, -456); +__builtin_riscv_esp_vst_h_64_xp(data, 2, data); +__builtin_riscv_esp_vst_l_64_ip(6, data, 664); +__builtin_riscv_esp_vst_l_64_xp(data, 4, data); +__builtin_riscv_esp_slci_2q(2, 0, 14); +__builtin_riscv_esp_slcxxp_2q(data, data, 0, 1); +__builtin_riscv_esp_src_q(7, 3, 2); +__builtin_riscv_esp_src_q_ld_ip(1, data, 4, 1168, 4); +__builtin_riscv_esp_src_q_ld_xp(data, 0, data, 1, 0); +__builtin_riscv_esp_src_q_qup(3, 3, 0); +__builtin_riscv_esp_srci_2q(7, 4, 1); +__builtin_riscv_esp_srcmb_s16_q_qacc(2, 1, 5); +__builtin_riscv_esp_srcmb_s16_qacc(data, 0, 7); +__builtin_riscv_esp_srcmb_s8_q_qacc(7, 0, 3); +__builtin_riscv_esp_srcmb_s8_qacc(data, 1, 3); +__builtin_riscv_esp_srcmb_u16_q_qacc(6, 1, 0); +__builtin_riscv_esp_srcmb_u16_qacc(data, 0, 0); +__builtin_riscv_esp_srcmb_u8_q_qacc(6, 0, 7); +__builtin_riscv_esp_srcmb_u8_qacc(data, 1, 2); +__builtin_riscv_esp_srcq_128_st_incp(0, 5, data); +__builtin_riscv_esp_srcxxp_2q(data, data, 7, 5); +__builtin_riscv_esp_srs_s_xacc(data, data); +__builtin_riscv_esp_srs_u_xacc(data, data); +__builtin_riscv_esp_vsl_32(0, 3); +__builtin_riscv_esp_vsld_16(6, 4, 4); +__builtin_riscv_esp_vsld_32(2, 7, 5); +__builtin_riscv_esp_vsld_8(1, 0, 0); +__builtin_riscv_esp_vsr_s32(6, 2); +__builtin_riscv_esp_vsr_u32(3, 2); +__builtin_riscv_esp_vsrd_16(6, 2, 1); +__builtin_riscv_esp_vsrd_32(7, 5, 4); +__builtin_riscv_esp_vsrd_8(2, 1, 4); +__builtin_riscv_esp_st_s_xacc_ip(data, 912); +__builtin_riscv_esp_st_u_xacc_ip(data, -112); +} diff --git a/clang/test/Misc/target-invalid-cpu-note.c b/clang/test/Misc/target-invalid-cpu-note.c index dc81fd74c7e5ec..58fb15d3240344 100644 --- a/clang/test/Misc/target-invalid-cpu-note.c +++ b/clang/test/Misc/target-invalid-cpu-note.c @@ -81,7 +81,7 @@ // RUN: not %clang_cc1 -triple riscv32 -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix RISCV32 // RISCV32: error: unknown target CPU 'not-a-cpu' -// RISCV32-NEXT: note: valid target CPU values are: generic-rv32, rocket-rv32, sifive-e20, sifive-e21, sifive-e24, sifive-e31, sifive-e34, sifive-e76, syntacore-scr1-base, syntacore-scr1-max{{$}} +// RISCV32-NEXT: note: valid target CPU values are: esp32p4, generic-rv32, rocket-rv32, sifive-e20, sifive-e21, sifive-e24, sifive-e31, sifive-e34, sifive-e76, syntacore-scr1-base, syntacore-scr1-max{{$}} // RUN: not %clang_cc1 -triple riscv64 -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix RISCV64 // RISCV64: error: unknown target CPU 'not-a-cpu' @@ -89,7 +89,7 @@ // RUN: not %clang_cc1 -triple riscv32 -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-RISCV32 // TUNE-RISCV32: error: unknown target CPU 'not-a-cpu' -// TUNE-RISCV32-NEXT: note: valid target CPU values are: generic-rv32, rocket-rv32, sifive-e20, sifive-e21, sifive-e24, sifive-e31, sifive-e34, sifive-e76, syntacore-scr1-base, syntacore-scr1-max, generic, rocket, sifive-7-series{{$}} +// TUNE-RISCV32-NEXT: note: valid target CPU values are: esp32p4, generic-rv32, rocket-rv32, sifive-e20, sifive-e21, sifive-e24, sifive-e31, sifive-e34, sifive-e76, syntacore-scr1-base, syntacore-scr1-max, generic, rocket, sifive-7-series{{$}} // RUN: not %clang_cc1 -triple riscv64 -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-RISCV64 // TUNE-RISCV64: error: unknown target CPU 'not-a-cpu' diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 9a63d14b0ef0a1..3220057476ea18 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1889,3 +1889,7 @@ let TargetPrefix = "riscv" in { include "llvm/IR/IntrinsicsRISCVXTHead.td" include "llvm/IR/IntrinsicsRISCVXsf.td" include "llvm/IR/IntrinsicsRISCVXCV.td" + +// Generated code +// -------------- +include "llvm/IR/IntrinsicsRISCVESP32P4.td" \ No newline at end of file diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td b/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td new file mode 100644 index 00000000000000..c1a11f90f12dd2 --- /dev/null +++ b/llvm/include/llvm/IR/IntrinsicsRISCVESP32P4.td @@ -0,0 +1,1065 @@ +let TargetPrefix = "riscv" in { +def int_riscv_esp_vcmulas_s16_qacc_h: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_h">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_h_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_h_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_h_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_l: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_l">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_l_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s16_qacc_l_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s16_qacc_l_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_h: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_h">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_h_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_h_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_h_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_l: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_l">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_l_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmulas_s8_qacc_l_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vcmulas_s8_qacc_l_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_xacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_xacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_xacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc_ld_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc_ld_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc_st_ip: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_st_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_xacc_st_xp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_xacc_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s16_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s16_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_s8_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_s8_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u16_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u16_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmulas_u8_qacc_ldbc_incp: ClangBuiltin<"__builtin_riscv_esp_vmulas_u8_qacc_ldbc_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_s16_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s16_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_s8_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_s8_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_u16_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u16_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsmulas_u8_qacc_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsmulas_u8_qacc_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s16: ClangBuiltin<"__builtin_riscv_esp_cmul_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s8: ClangBuiltin<"__builtin_riscv_esp_cmul_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u16: ClangBuiltin<"__builtin_riscv_esp_cmul_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u8: ClangBuiltin<"__builtin_riscv_esp_cmul_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_cmul_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_cmul_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_max_s16_a: ClangBuiltin<"__builtin_riscv_esp_max_s16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_s32_a: ClangBuiltin<"__builtin_riscv_esp_max_s32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_s8_a: ClangBuiltin<"__builtin_riscv_esp_max_s8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_u16_a: ClangBuiltin<"__builtin_riscv_esp_max_u16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_u32_a: ClangBuiltin<"__builtin_riscv_esp_max_u32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_max_u8_a: ClangBuiltin<"__builtin_riscv_esp_max_u8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_s16_a: ClangBuiltin<"__builtin_riscv_esp_min_s16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_s32_a: ClangBuiltin<"__builtin_riscv_esp_min_s32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_s8_a: ClangBuiltin<"__builtin_riscv_esp_min_s8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_u16_a: ClangBuiltin<"__builtin_riscv_esp_min_u16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_u32_a: ClangBuiltin<"__builtin_riscv_esp_min_u32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_min_u8_a: ClangBuiltin<"__builtin_riscv_esp_min_u8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vabs_16: ClangBuiltin<"__builtin_riscv_esp_vabs_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vabs_32: ClangBuiltin<"__builtin_riscv_esp_vabs_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vabs_8: ClangBuiltin<"__builtin_riscv_esp_vabs_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s16: ClangBuiltin<"__builtin_riscv_esp_vadd_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s32: ClangBuiltin<"__builtin_riscv_esp_vadd_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s8: ClangBuiltin<"__builtin_riscv_esp_vadd_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u16: ClangBuiltin<"__builtin_riscv_esp_vadd_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u32: ClangBuiltin<"__builtin_riscv_esp_vadd_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u8: ClangBuiltin<"__builtin_riscv_esp_vadd_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vadd_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vadd_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vclamp_s16: ClangBuiltin<"__builtin_riscv_esp_vclamp_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s16: ClangBuiltin<"__builtin_riscv_esp_vmax_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s32: ClangBuiltin<"__builtin_riscv_esp_vmax_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s8: ClangBuiltin<"__builtin_riscv_esp_vmax_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u16: ClangBuiltin<"__builtin_riscv_esp_vmax_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u32: ClangBuiltin<"__builtin_riscv_esp_vmax_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u8: ClangBuiltin<"__builtin_riscv_esp_vmax_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmax_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmax_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s16: ClangBuiltin<"__builtin_riscv_esp_vmin_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s32: ClangBuiltin<"__builtin_riscv_esp_vmin_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s8: ClangBuiltin<"__builtin_riscv_esp_vmin_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u16: ClangBuiltin<"__builtin_riscv_esp_vmin_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u32: ClangBuiltin<"__builtin_riscv_esp_vmin_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u8: ClangBuiltin<"__builtin_riscv_esp_vmin_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmin_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmin_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s16: ClangBuiltin<"__builtin_riscv_esp_vmul_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s16_s8xs8: ClangBuiltin<"__builtin_riscv_esp_vmul_s16_s8xs8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s32_s16xs16: ClangBuiltin<"__builtin_riscv_esp_vmul_s32_s16xs16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s8: ClangBuiltin<"__builtin_riscv_esp_vmul_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u16: ClangBuiltin<"__builtin_riscv_esp_vmul_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u8: ClangBuiltin<"__builtin_riscv_esp_vmul_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vmul_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vmul_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vprelu_s16: ClangBuiltin<"__builtin_riscv_esp_vprelu_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vprelu_s8: ClangBuiltin<"__builtin_riscv_esp_vprelu_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vrelu_s16: ClangBuiltin<"__builtin_riscv_esp_vrelu_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vrelu_s8: ClangBuiltin<"__builtin_riscv_esp_vrelu_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vsadds_s16: ClangBuiltin<"__builtin_riscv_esp_vsadds_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsadds_s8: ClangBuiltin<"__builtin_riscv_esp_vsadds_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsadds_u16: ClangBuiltin<"__builtin_riscv_esp_vsadds_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsadds_u8: ClangBuiltin<"__builtin_riscv_esp_vsadds_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_s16: ClangBuiltin<"__builtin_riscv_esp_vsat_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_s32: ClangBuiltin<"__builtin_riscv_esp_vsat_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_s8: ClangBuiltin<"__builtin_riscv_esp_vsat_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_u16: ClangBuiltin<"__builtin_riscv_esp_vsat_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_u32: ClangBuiltin<"__builtin_riscv_esp_vsat_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsat_u8: ClangBuiltin<"__builtin_riscv_esp_vsat_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vssubs_s16: ClangBuiltin<"__builtin_riscv_esp_vssubs_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vssubs_s8: ClangBuiltin<"__builtin_riscv_esp_vssubs_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vssubs_u16: ClangBuiltin<"__builtin_riscv_esp_vssubs_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vssubs_u8: ClangBuiltin<"__builtin_riscv_esp_vssubs_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s16: ClangBuiltin<"__builtin_riscv_esp_vsub_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s32: ClangBuiltin<"__builtin_riscv_esp_vsub_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s8: ClangBuiltin<"__builtin_riscv_esp_vsub_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_s8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_s8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u16: ClangBuiltin<"__builtin_riscv_esp_vsub_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u16_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u32: ClangBuiltin<"__builtin_riscv_esp_vsub_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u32_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u32_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u32_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u32_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u8: ClangBuiltin<"__builtin_riscv_esp_vsub_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u8_ld_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u8_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsub_u8_st_incp: ClangBuiltin<"__builtin_riscv_esp_vsub_u8_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_addx2: ClangBuiltin<"__builtin_riscv_esp_addx2">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_addx4: ClangBuiltin<"__builtin_riscv_esp_addx4">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_sat: ClangBuiltin<"__builtin_riscv_esp_sat">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_subx2: ClangBuiltin<"__builtin_riscv_esp_subx2">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_subx4: ClangBuiltin<"__builtin_riscv_esp_subx4">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_andq: ClangBuiltin<"__builtin_riscv_esp_andq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_notq: ClangBuiltin<"__builtin_riscv_esp_notq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_orq: ClangBuiltin<"__builtin_riscv_esp_orq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_xorq: ClangBuiltin<"__builtin_riscv_esp_xorq">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_s16: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_s32: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_s8: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_u16: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_u32: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_eq_u8: ClangBuiltin<"__builtin_riscv_esp_vcmp_eq_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_s16: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_s32: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_s8: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_u16: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_u32: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_gt_u8: ClangBuiltin<"__builtin_riscv_esp_vcmp_gt_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_s16: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_s32: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_s8: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_u16: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_u32: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vcmp_lt_u8: ClangBuiltin<"__builtin_riscv_esp_vcmp_lt_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_mov_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_mov_s16_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_mov_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_mov_s8_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_mov_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_mov_u16_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_mov_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_mov_u8_qacc">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_movi_16_a: ClangBuiltin<"__builtin_riscv_esp_movi_16_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_16_q: ClangBuiltin<"__builtin_riscv_esp_movi_16_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_32_a: ClangBuiltin<"__builtin_riscv_esp_movi_32_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_32_q: ClangBuiltin<"__builtin_riscv_esp_movi_32_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_8_a: ClangBuiltin<"__builtin_riscv_esp_movi_8_a">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movi_8_q: ClangBuiltin<"__builtin_riscv_esp_movi_8_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_movx_r_cfg: ClangBuiltin<"__builtin_riscv_esp_movx_r_cfg">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_fft_bit_width: ClangBuiltin<"__builtin_riscv_esp_movx_r_fft_bit_width">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_perf: ClangBuiltin<"__builtin_riscv_esp_movx_r_perf">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_sar: ClangBuiltin<"__builtin_riscv_esp_movx_r_sar">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_sar_bytes: ClangBuiltin<"__builtin_riscv_esp_movx_r_sar_bytes">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_xacc_h: ClangBuiltin<"__builtin_riscv_esp_movx_r_xacc_h">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_r_xacc_l: ClangBuiltin<"__builtin_riscv_esp_movx_r_xacc_l">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_cfg: ClangBuiltin<"__builtin_riscv_esp_movx_w_cfg">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_fft_bit_width: ClangBuiltin<"__builtin_riscv_esp_movx_w_fft_bit_width">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_perf: ClangBuiltin<"__builtin_riscv_esp_movx_w_perf">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_sar: ClangBuiltin<"__builtin_riscv_esp_movx_w_sar">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_sar_bytes: ClangBuiltin<"__builtin_riscv_esp_movx_w_sar_bytes">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_xacc_h: ClangBuiltin<"__builtin_riscv_esp_movx_w_xacc_h">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_movx_w_xacc_l: ClangBuiltin<"__builtin_riscv_esp_movx_w_xacc_l">, + Intrinsic<[], [llvm_i32_ty], []>; + +def int_riscv_esp_vext_s16: ClangBuiltin<"__builtin_riscv_esp_vext_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vext_s8: ClangBuiltin<"__builtin_riscv_esp_vext_s8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vext_u16: ClangBuiltin<"__builtin_riscv_esp_vext_u16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vext_u8: ClangBuiltin<"__builtin_riscv_esp_vext_u8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzip_16: ClangBuiltin<"__builtin_riscv_esp_vunzip_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzip_32: ClangBuiltin<"__builtin_riscv_esp_vunzip_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzip_8: ClangBuiltin<"__builtin_riscv_esp_vunzip_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzipt_16: ClangBuiltin<"__builtin_riscv_esp_vunzipt_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vunzipt_8: ClangBuiltin<"__builtin_riscv_esp_vunzipt_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzip_16: ClangBuiltin<"__builtin_riscv_esp_vzip_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzip_32: ClangBuiltin<"__builtin_riscv_esp_vzip_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzip_8: ClangBuiltin<"__builtin_riscv_esp_vzip_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzipt_16: ClangBuiltin<"__builtin_riscv_esp_vzipt_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vzipt_8: ClangBuiltin<"__builtin_riscv_esp_vzipt_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_zero_q: ClangBuiltin<"__builtin_riscv_esp_zero_q">, + Intrinsic<[], [llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_zero_qacc: ClangBuiltin<"__builtin_riscv_esp_zero_qacc">, + Intrinsic<[], [], []>; + +def int_riscv_esp_zero_xacc: ClangBuiltin<"__builtin_riscv_esp_zero_xacc">, + Intrinsic<[], [], []>; + +def int_riscv_esp_fft_ams_s16_ld_incp: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_ld_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_ams_s16_ld_incp_uaup: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_ld_incp_uaup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_ams_s16_ld_r32_decp: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_ld_r32_decp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_ams_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_fft_ams_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_bitrev: ClangBuiltin<"__builtin_riscv_esp_fft_bitrev">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_fft_cmul_s16_ld_xp: ClangBuiltin<"__builtin_riscv_esp_fft_cmul_s16_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_cmul_s16_st_xp: ClangBuiltin<"__builtin_riscv_esp_fft_cmul_s16_st_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_r2bf_s16: ClangBuiltin<"__builtin_riscv_esp_fft_r2bf_s16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_r2bf_s16_st_incp: ClangBuiltin<"__builtin_riscv_esp_fft_r2bf_s16_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_fft_vst_r32_decp: ClangBuiltin<"__builtin_riscv_esp_fft_vst_r32_decp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_ld_128_usar_ip: ClangBuiltin<"__builtin_riscv_esp_ld_128_usar_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_ld_128_usar_xp: ClangBuiltin<"__builtin_riscv_esp_ld_128_usar_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_xacc_ip: ClangBuiltin<"__builtin_riscv_esp_ld_xacc_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_s16_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_s16_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_s16_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_s16_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_ldqa_s8_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_s8_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_s8_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_s8_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_ldqa_u16_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_u16_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_u16_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_u16_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_ldqa_u8_128_ip: ClangBuiltin<"__builtin_riscv_esp_ldqa_u8_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldqa_u8_128_xp: ClangBuiltin<"__builtin_riscv_esp_ldqa_u8_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_vldbc_16_ip: ClangBuiltin<"__builtin_riscv_esp_vldbc_16_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldbc_16_xp: ClangBuiltin<"__builtin_riscv_esp_vldbc_16_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vldbc_32_ip: ClangBuiltin<"__builtin_riscv_esp_vldbc_32_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldbc_32_xp: ClangBuiltin<"__builtin_riscv_esp_vldbc_32_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vldbc_8_ip: ClangBuiltin<"__builtin_riscv_esp_vldbc_8_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldbc_8_xp: ClangBuiltin<"__builtin_riscv_esp_vldbc_8_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vldext_s16_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_s16_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_s16_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_s16_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_s8_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_s8_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_s8_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_s8_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_u16_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_u16_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_u16_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_u16_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_u8_ip: ClangBuiltin<"__builtin_riscv_esp_vldext_u8_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldext_u8_xp: ClangBuiltin<"__builtin_riscv_esp_vldext_u8_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vldhbc_16_incp: ClangBuiltin<"__builtin_riscv_esp_vldhbc_16_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_ld_qacc_h_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_h_h_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_qacc_h_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_h_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_qacc_l_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_l_h_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_qacc_l_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_ld_qacc_l_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ld_ua_state_ip: ClangBuiltin<"__builtin_riscv_esp_ld_ua_state_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_ldxq_32: ClangBuiltin<"__builtin_riscv_esp_ldxq_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_st_qacc_h_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_h_h_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_qacc_h_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_h_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_qacc_l_h_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_l_h_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_qacc_l_l_128_ip: ClangBuiltin<"__builtin_riscv_esp_st_qacc_l_l_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_ua_state_ip: ClangBuiltin<"__builtin_riscv_esp_st_ua_state_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_stxq_32: ClangBuiltin<"__builtin_riscv_esp_stxq_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vld_128_ip: ClangBuiltin<"__builtin_riscv_esp_vld_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vld_128_xp: ClangBuiltin<"__builtin_riscv_esp_vld_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vld_h_64_ip: ClangBuiltin<"__builtin_riscv_esp_vld_h_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vld_h_64_xp: ClangBuiltin<"__builtin_riscv_esp_vld_h_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vld_l_64_ip: ClangBuiltin<"__builtin_riscv_esp_vld_l_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vld_l_64_xp: ClangBuiltin<"__builtin_riscv_esp_vld_l_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vst_128_ip: ClangBuiltin<"__builtin_riscv_esp_vst_128_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vst_128_xp: ClangBuiltin<"__builtin_riscv_esp_vst_128_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vst_h_64_ip: ClangBuiltin<"__builtin_riscv_esp_vst_h_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vst_h_64_xp: ClangBuiltin<"__builtin_riscv_esp_vst_h_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_vst_l_64_ip: ClangBuiltin<"__builtin_riscv_esp_vst_l_64_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vst_l_64_xp: ClangBuiltin<"__builtin_riscv_esp_vst_l_64_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_slci_2q: ClangBuiltin<"__builtin_riscv_esp_slci_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_slcxxp_2q: ClangBuiltin<"__builtin_riscv_esp_slcxxp_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_src_q: ClangBuiltin<"__builtin_riscv_esp_src_q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_src_q_ld_ip: ClangBuiltin<"__builtin_riscv_esp_src_q_ld_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_src_q_ld_xp: ClangBuiltin<"__builtin_riscv_esp_src_q_ld_xp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_src_q_qup: ClangBuiltin<"__builtin_riscv_esp_src_q_qup">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srci_2q: ClangBuiltin<"__builtin_riscv_esp_srci_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_s16_q_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_s16_q_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_s16_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_s16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_s8_q_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_s8_q_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_s8_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_s8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_u16_q_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_u16_q_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_u16_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_u16_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_u8_q_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_u8_q_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcmb_u8_qacc: ClangBuiltin<"__builtin_riscv_esp_srcmb_u8_qacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcq_128_st_incp: ClangBuiltin<"__builtin_riscv_esp_srcq_128_st_incp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srcxxp_2q: ClangBuiltin<"__builtin_riscv_esp_srcxxp_2q">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_srs_s_xacc: ClangBuiltin<"__builtin_riscv_esp_srs_s_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_srs_u_xacc: ClangBuiltin<"__builtin_riscv_esp_srs_u_xacc">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>; + +def int_riscv_esp_vsl_32: ClangBuiltin<"__builtin_riscv_esp_vsl_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsld_16: ClangBuiltin<"__builtin_riscv_esp_vsld_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsld_32: ClangBuiltin<"__builtin_riscv_esp_vsld_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsld_8: ClangBuiltin<"__builtin_riscv_esp_vsld_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsr_s32: ClangBuiltin<"__builtin_riscv_esp_vsr_s32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsr_u32: ClangBuiltin<"__builtin_riscv_esp_vsr_u32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsrd_16: ClangBuiltin<"__builtin_riscv_esp_vsrd_16">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsrd_32: ClangBuiltin<"__builtin_riscv_esp_vsrd_32">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_vsrd_8: ClangBuiltin<"__builtin_riscv_esp_vsrd_8">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg>, ImmArg>, ImmArg>]>; + +def int_riscv_esp_st_s_xacc_ip: ClangBuiltin<"__builtin_riscv_esp_st_s_xacc_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + +def int_riscv_esp_st_u_xacc_ip: ClangBuiltin<"__builtin_riscv_esp_st_u_xacc_ip">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg>]>; + + +} diff --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp index db2e4ca92ae40a..0654b76e3fbf84 100644 --- a/llvm/lib/Support/RISCVISAInfo.cpp +++ b/llvm/lib/Support/RISCVISAInfo.cpp @@ -71,6 +71,7 @@ static const RISCVSupportedExtension SupportedExtensions[] = { {"xcvmac", {1, 0}}, {"xcvmem", {1, 0}}, {"xcvsimd", {1, 0}}, + {"xesppie", {1, 0}}, {"xsfvcp", {1, 0}}, {"xsfvfnrclipxfqf", {1, 0}}, {"xsfvfwmaccqqq", {1, 0}}, diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index f6e8386aff4510..4a573236dcf24a 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -442,6 +442,14 @@ struct RISCVOperand final : public MCParsedAsmOperand { } } + static bool inRange(const MCExpr *Expr, int64_t MinValue, int64_t MaxValue) { + if (auto *CE = dyn_cast(Expr)) { + int64_t Value = CE->getValue(); + return Value >= MinValue && Value <= MaxValue; + } + return false; + } + bool isToken() const override { return Kind == KindTy::Token; } bool isReg() const override { return Kind == KindTy::Register; } bool isV0Reg() const { @@ -467,6 +475,43 @@ struct RISCVOperand final : public MCParsedAsmOperand { bool isRlist() const { return Kind == KindTy::Rlist; } bool isSpimm() const { return Kind == KindTy::Spimm; } + bool isImm(int64_t MinValue, int64_t MaxValue) const { + return Kind == KindTy::Immediate && inRange(getImm(), MinValue, MaxValue); + } + + bool isImm8() const { + // The addi instruction maybe expaned to addmi and addi. + return isImm((-32768 - 128), (32512 + 127)); + } + + bool isSelect_2() const { return isImm(0, 1); } + + bool isSelect_4() const { return isImm(0, 3); } + + bool isSelect_8() const { return isImm(0, 7); } + + bool isSelect_16() const { return isImm(0, 16); } + + bool isOffset_16_16() const { + return isImm(-128, 112) && + ((cast(getImm())->getValue() & 0xf) == 0); + } + + bool isOffset_256_8() const { + return isImm(-1024, 1016) && + ((cast(getImm())->getValue() & 0x7) == 0); + } + + bool isOffset_256_16() const { + return isImm(-2048, 2032) && + ((cast(getImm())->getValue() & 0xf) == 0); + } + + bool isOffset_256_4() const { + return isImm(-512, 508) && + ((cast(getImm())->getValue() & 0x3) == 0); + } + bool isGPR() const { return Kind == KindTy::Register && RISCVMCRegisterClasses[RISCV::GPRRegClassID].contains(Reg.RegNum); @@ -841,6 +886,54 @@ struct RISCVOperand final : public MCParsedAsmOperand { VK == RISCVMCExpr::VK_RISCV_None; } + bool isUImm9() const { + if (!isImm()) + return false; + int64_t Imm; + RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None; + bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK); + return IsConstantImm && isUInt<9>(Imm) && + VK == RISCVMCExpr::VK_RISCV_None; + } + + bool isUImm10() const { + RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None; + int64_t Imm; + bool IsValid; + if (!isImm()) + return false; + bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK); + if (!IsConstantImm) + IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK); + else + IsValid = isUInt<10>(fixImmediateForRV32(Imm, isRV64Imm())); + return IsValid && VK == RISCVMCExpr::VK_RISCV_None; + } + + bool isUImm12() const { + if (!isImm()) + return false; + int64_t Imm; + RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None; + bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK); + return IsConstantImm && isUInt<12>(Imm) && + VK == RISCVMCExpr::VK_RISCV_None; + } + + bool isUImm13() const { + RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None; + int64_t Imm; + bool IsValid; + if (!isImm()) + return false; + bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK); + if (!IsConstantImm) + IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK); + else + IsValid = isUInt<13>(fixImmediateForRV32(Imm, isRV64Imm())); + return IsValid && VK == RISCVMCExpr::VK_RISCV_None; + } + bool isUImm10Lsb00NonZero() const { if (!isImm()) return false; @@ -1533,6 +1626,10 @@ bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, return generateImmOutOfRangeError( Operands, ErrorInfo, -(1 << 11), (1 << 11) - 32, "immediate must be a multiple of 32 bytes in the range"); + case Match_InvalidUImm12: + return generateImmOutOfRangeError( + Operands, ErrorInfo, 0, (1 << 12) - 1, + "immediate must be in the range"); case Match_InvalidSImm13Lsb0: return generateImmOutOfRangeError( Operands, ErrorInfo, -(1 << 12), (1 << 12) - 2, diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt index ac88cd49db4e4b..30e4a150922899 100644 --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -43,6 +43,7 @@ add_llvm_target(RISCVCodeGen RISCVInstrInfo.cpp RISCVISelDAGToDAG.cpp RISCVISelLowering.cpp + RISCVESP32P4ISelLowering.cpp RISCVMachineFunctionInfo.cpp RISCVMergeBaseOffset.cpp RISCVOptWInstrs.cpp diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp index 4dd039159e29dc..0ad3325e7cb703 100644 --- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp +++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp @@ -74,6 +74,20 @@ static DecodeStatus DecodeGPRRegisterClass(MCInst &Inst, uint32_t RegNo, return MCDisassembler::Success; } +static DecodeStatus DecodeGPRPIERegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const MCDisassembler *Decoder) { + auto bit4 = RegNo & 0x8; + RegNo |= (bit4 << 4); + RegNo |= (1 << 3); + if ((RegNo >= 8 && RegNo <= 15) || (RegNo >= 24 && RegNo <= 31)) { + MCRegister Reg = RISCV::X0 + RegNo; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; + } + return MCDisassembler::Fail; +} + static DecodeStatus DecodeGPRX1X5RegisterClass(MCInst &Inst, uint32_t RegNo, uint64_t Address, const MCDisassembler *Decoder) { @@ -255,6 +269,21 @@ static DecodeStatus DecodeVRM8RegisterClass(MCInst &Inst, uint32_t RegNo, return MCDisassembler::Success; } +static const unsigned QRDecoderTable[] = {RISCV::Q0, RISCV::Q1, RISCV::Q2, + RISCV::Q3, RISCV::Q4, RISCV::Q5, + RISCV::Q6, RISCV::Q7}; + +static DecodeStatus DecodeQRRegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= std::size(QRDecoderTable)) + return MCDisassembler::Fail; + + unsigned Reg = QRDecoderTable[RegNo]; + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static DecodeStatus decodeVMaskReg(MCInst &Inst, uint64_t RegNo, uint64_t Address, const MCDisassembler *Decoder) { @@ -374,6 +403,43 @@ static DecodeStatus decodeCSSPushPopchk(MCInst &Inst, uint32_t Insn, uint64_t Address, const MCDisassembler *Decoder); +static DecodeStatus decodeSelect_2Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder); + +static DecodeStatus decodeSelect_4Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder); + +static DecodeStatus decodeSelect_8Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, const void *Decoder); + +static DecodeStatus decodeSelect_16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeOffset_16_16Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeOffset_256_8Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeOffset_256_16Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeOffset_256_4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeUImm13_Step4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + +static DecodeStatus decodeUImm10_Step4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder); + #include "RISCVGenDisassemblerTables.inc" static DecodeStatus decodeRVCInstrRdRs1ImmZero(MCInst &Inst, uint32_t Insn, @@ -493,6 +559,90 @@ static DecodeStatus decodeZcmpSpimm(MCInst &Inst, unsigned Imm, return MCDisassembler::Success; } +static DecodeStatus decodeSelect_2Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeSelect_4Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeSelect_8Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeSelect_16Operand(MCInst &Inst, uint64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<8>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_16_16Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<8>(Imm) && "Invalid immediate"); + auto ImmSigned = SignExtend64<4>(Imm); + Inst.addOperand(MCOperand::createImm(ImmSigned * 16)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_256_8Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + auto ImmSigned = SignExtend64<4>(Imm); + Inst.addOperand(MCOperand::createImm(ImmSigned * 8)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_256_16Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + auto ImmSigned = SignExtend64<4>(Imm); + Inst.addOperand(MCOperand::createImm(ImmSigned * 16)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeOffset_256_4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isInt<16>(Imm) && "Invalid immediate"); + auto ImmSigned = SignExtend64<4>(Imm); + Inst.addOperand(MCOperand::createImm(ImmSigned * 4)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeUImm13_Step4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<13>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm((Imm * 2) * 2)); + return MCDisassembler::Success; +} + +static DecodeStatus decodeUImm10_Step4Operand(MCInst &Inst, int64_t Imm, + int64_t Address, + const void *Decoder) { + assert(isUInt<10>(Imm) && "Invalid immediate"); + Inst.addOperand(MCOperand::createImm(Imm * 2)); + return MCDisassembler::Success; +} + // Add implied SP operand for C.*SP compressed instructions. The SP operand // isn't explicitly encoded in the instruction. void RISCVDisassembler::addSPOperands(MCInst &MI) const { @@ -595,6 +745,9 @@ DecodeStatus RISCVDisassembler::getInstruction(MCInst &MI, uint64_t &Size, TRY_TO_DECODE_FEATURE( RISCV::FeatureVendorXSfvfnrclipxfqf, DecoderTableXSfvfnrclipxfqf32, "SiFive FP32-to-int8 Ranged Clip Instructions opcode table"); + TRY_TO_DECODE_FEATURE( + RISCV::FeatureVendorESP32P4, DecoderTableESP32P432, + "ESP32P4 Instruction opcode table"); TRY_TO_DECODE_FEATURE(RISCV::FeatureVendorXCVbitmanip, DecoderTableXCVbitmanip32, "CORE-V Bit Manipulation custom opcode table"); diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index d7f7859ce4399b..faefd0e32514f8 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -51,6 +51,7 @@ enum { InstFormatCSB = 20, InstFormatCSH = 21, InstFormatOther = 22, + InstFormatESP32P4 = 23, InstFormatMask = 31, InstFormatShift = 0, @@ -294,7 +295,9 @@ enum OperandType : unsigned { OPERAND_UIMM8_GE32, OPERAND_UIMM9_LSB000, OPERAND_UIMM10_LSB00_NONZERO, + OPERAND_UIMM10_STEP4, OPERAND_UIMM12, + OPERAND_UIMM13_STEP4, OPERAND_ZERO, OPERAND_SIMM5, OPERAND_SIMM5_PLUS1, diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp index bd899495812f44..bff84bb9aaffad 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp @@ -329,3 +329,119 @@ const char *RISCVInstPrinter::getRegisterName(MCRegister Reg) { return getRegisterName(Reg, ArchRegNames ? RISCV::NoRegAltName : RISCV::ABIRegAltName); } + +void RISCVInstPrinter::printImm8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert(isInt<8>(Value) && + "Invalid argument, value must be in ranges [-128,127]"); + O << Value; + } else { + printOperand(MI, OpNum, STI, O); + } +} + +void RISCVInstPrinter::printSelect_2_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 1) && + "Invalid argument, value must be in range [0,1]"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printSelect_4_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 3) && + "Invalid argument, value must be in range [0,3]"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printSelect_8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 7) && + "Invalid argument, value must be in range [0,7]"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printSelect_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= 0 && Value <= 15) && + "Invalid argument, value must be in range [0,15]"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printOffset_16_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -128 && Value <= 112 && (Value & 0xf) == 0) && + "Invalid argument, value must be in range [-128,112], first 4 bits " + "should be zero"); + O << Value; + } else { + printOperand(MI, OpNum, STI, O); + } +} + +void RISCVInstPrinter::printOffset_256_8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -1024 && Value <= 1016 && (Value & 0x7) == 0) && + "Invalid argument, value must be in range [-1024,1016], first 3 " + "bits should be zero"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} + +void RISCVInstPrinter::printOffset_256_16_AsmOperand(const MCInst *MI, + int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -2048 && Value <= 2032 && (Value & 0xf) == 0) && + "Invalid argument, value must be in range [-2048,2032], first 4 " + "bits should be zero"); + O << Value; + } else { + printOperand(MI, OpNum, STI, O); + } +} + +void RISCVInstPrinter::printOffset_256_4_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + if (MI->getOperand(OpNum).isImm()) { + int64_t Value = MI->getOperand(OpNum).getImm(); + assert((Value >= -512 && Value <= 508 && (Value & 0x3) == 0) && + "Invalid argument, value must be in range [-512,508], first 2 bits " + "should be zero"); + O << Value; + } else + printOperand(MI, OpNum, STI, O); +} diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h index 4512bd5f4c4b7a..6eab3bbfe2ff26 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.h @@ -67,6 +67,26 @@ class RISCVInstPrinter : public MCInstPrinter { const MCSubtargetInfo &STI, raw_ostream &O); static const char *getRegisterName(MCRegister Reg); static const char *getRegisterName(MCRegister Reg, unsigned AltIdx); + + void printImm8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSelect_2_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSelect_4_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSelect_8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSelect_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOffset_16_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOffset_256_8_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOffset_256_16_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O); + void printOffset_256_4_AsmOperand(const MCInst *MI, int OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); }; } // namespace llvm diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp index 5ea386c3c32a3d..c1e894a35b0bc1 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp @@ -100,6 +100,54 @@ class RISCVMCCodeEmitter : public MCCodeEmitter { unsigned getRegReg(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + + uint32_t getImm8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_2OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint8_t getSelect_256OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int8_t getOffset_16_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_256_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_256_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + int16_t getOffset_256_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint16_t getUImm10_Step4Operand(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; + + uint16_t getUImm13_Step4Operand(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; }; } // end anonymous namespace @@ -493,7 +541,8 @@ unsigned RISCVMCCodeEmitter::getImmOpValue(const MCInst &MI, unsigned OpNo, // FIXME: Sub kind binary exprs have chance of underflow. if (MIFrm == RISCVII::InstFormatJ) { FixupKind = RISCV::fixup_riscv_jal; - } else if (MIFrm == RISCVII::InstFormatB) { + } else if (MIFrm == RISCVII::InstFormatB || + MIFrm == RISCVII::InstFormatESP32P4) { FixupKind = RISCV::fixup_riscv_branch; } else if (MIFrm == RISCVII::InstFormatCJ) { FixupKind = RISCV::fixup_riscv_rvc_jump; @@ -563,4 +612,140 @@ unsigned RISCVMCCodeEmitter::getRegReg(const MCInst &MI, unsigned OpNo, return Op | Op1 << 5; } +uint32_t RISCVMCCodeEmitter::getImm8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int32_t Res = MO.getImm(); + + assert(((Res >= -128) && (Res <= 127)) && "Unexpected operand value!"); + + return (Res & 0xff); +} + +uint8_t +RISCVMCCodeEmitter::getSelect_2OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 1)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t +RISCVMCCodeEmitter::getSelect_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 3)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t +RISCVMCCodeEmitter::getSelect_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 7)) && "Unexpected operand value!"); + + return Res; +} + +uint8_t +RISCVMCCodeEmitter::getSelect_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + uint8_t Res = static_cast(MO.getImm()); + + assert(((Res >= 0) && (Res <= 15)) && "Unexpected operand value!"); + + return Res; +} + +int8_t +RISCVMCCodeEmitter::getOffset_16_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int8_t Res = static_cast(MO.getImm()); + + assert(((Res >= -128) && (Res <= 112) && ((Res & 0xf) == 0)) && + "Unexpected operand value!"); + + return Res / 16; +} + +int16_t +RISCVMCCodeEmitter::getOffset_256_8OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -1024) && (Res <= 1016) && ((Res & 0x7) == 0)) && + "Unexpected operand value!"); + + return Res / 8; +} + +int16_t +RISCVMCCodeEmitter::getOffset_256_16OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -2048) && (Res <= 2032) && ((Res & 0xf) == 0)) && + "Unexpected operand value!"); + + return Res / 16; +} + +int16_t +RISCVMCCodeEmitter::getOffset_256_4OpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + int16_t Res = static_cast(MO.getImm()); + + assert(((Res >= -512) && (Res <= 508) && ((Res & 0x3) == 0)) && + "Unexpected operand value!"); + + return Res / 4; +} + +uint16_t +RISCVMCCodeEmitter::getUImm10_Step4Operand(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + if (MO.isImm()) { + int16_t Res = static_cast(MO.getImm()); + assert((isUInt<10>(Res) && ((Res & 0x1) == 0)) && "Unexpected operand value!"); + return Res / 2; + } + return getImmOpValue(MI, OpNo, Fixups, STI); +} + +uint16_t +RISCVMCCodeEmitter::getUImm13_Step4Operand(const MCInst &MI, unsigned OpNo, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand &MO = MI.getOperand(OpNo); + if (MO.isImm()) { + int16_t Res = static_cast(MO.getImm()); + assert((isUInt<13>(Res) && ((Res & 0x1) == 0)) && "Unexpected operand value!"); + return Res / 2; + } + return getImmOpValue(MI, OpNo, Fixups, STI); +} #include "RISCVGenMCCodeEmitter.inc" diff --git a/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp new file mode 100644 index 00000000000000..0ec3aaf3ad1dc0 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVESP32P4ISelLowering.cpp @@ -0,0 +1,8468 @@ +//==- RISCVESP32P4ISelLowering.cpp - ESP32 P4 DAG Lowering Implementation -===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that Xtensa uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#include "RISCVISelLowering.h" +#include "RISCVSubtarget.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" + +using namespace llvm; + +MachineBasicBlock *RISCVTargetLowering::emitDSPInstrWithCustomInserter( + MachineInstr &MI, MachineBasicBlock *MBB, const TargetInstrInfo &TII, + MachineFunction *MF, MachineRegisterInfo &MRI, DebugLoc DL) const { + switch (MI.getOpcode()) { + default: + llvm_unreachable("Unexpected instr type to insert"); + case RISCV::ESP_VCMULAS_S16_QACC_H_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_H; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_H_LD_IP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_H_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_ip " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_ip " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_ip " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_H_LD_XP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_H_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_xp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_xp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_h_ld_xp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_L_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_L; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_L_LD_IP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_L_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_ip " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_ip " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_ip " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S16_QACC_L_LD_XP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S16_QACC_L_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_xp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_xp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s16_qacc_l_ld_xp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_H_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_H; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_H_LD_IP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_H_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_H_LD_XP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_H_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_h_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_L_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_L; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_L_LD_IP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_L_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMULAS_S8_QACC_L_LD_XP_P: { + unsigned Opc = RISCV::ESP_VCMULAS_S8_QACC_L_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vcmulas_s8_qacc_l_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_XACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_XACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_XACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_XACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_XACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_XACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_LD_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &OFFSET_16_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_LD_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_ST_IP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_ST_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_ip first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &OFFSET_16_16 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_XACC_ST_XP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_XACC_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_xacc_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S16_QACC_LDBC_INCP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S16_QACC_LDBC_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_S8_QACC_LDBC_INCP_P: { + unsigned Opc = RISCV::ESP_VMULAS_S8_QACC_LDBC_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_s8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U16_QACC_LDBC_INCP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U16_QACC_LDBC_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u16_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMULAS_U8_QACC_LDBC_INCP_P: { + unsigned Opc = RISCV::ESP_VMULAS_U8_QACC_LDBC_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmulas_u8_qacc_ldbc_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_S16_QACC_P: { + unsigned Opc = RISCV::ESP_VSMULAS_S16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_S16_QACC_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSMULAS_S16_QACC_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsmulas_s16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_S8_QACC_P: { + unsigned Opc = RISCV::ESP_VSMULAS_S8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_S8_QACC_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSMULAS_S8_QACC_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsmulas_s8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_U16_QACC_P: { + unsigned Opc = RISCV::ESP_VSMULAS_U16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_U16_QACC_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSMULAS_U16_QACC_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsmulas_u16_qacc_ld_incp " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_U8_QACC_P: { + unsigned Opc = RISCV::ESP_VSMULAS_U8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSMULAS_U8_QACC_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSMULAS_U8_QACC_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsmulas_u8_qacc_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S16_P: { + unsigned Opc = RISCV::ESP_CMUL_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S8_P: { + unsigned Opc = RISCV::ESP_CMUL_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U16_P: { + unsigned Opc = RISCV::ESP_CMUL_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U8_P: { + unsigned Opc = RISCV::ESP_CMUL_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_CMUL_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_CMUL_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_4 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_cmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_S16_A_P: { + unsigned Opc = RISCV::ESP_MAX_S16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_s16_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_S32_A_P: { + unsigned Opc = RISCV::ESP_MAX_S32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_s32_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_S8_A_P: { + unsigned Opc = RISCV::ESP_MAX_S8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_s8_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_U16_A_P: { + unsigned Opc = RISCV::ESP_MAX_U16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_u16_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_U32_A_P: { + unsigned Opc = RISCV::ESP_MAX_U32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_u32_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MAX_U8_A_P: { + unsigned Opc = RISCV::ESP_MAX_U8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_max_u8_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_S16_A_P: { + unsigned Opc = RISCV::ESP_MIN_S16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_s16_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_S32_A_P: { + unsigned Opc = RISCV::ESP_MIN_S32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_s32_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_S8_A_P: { + unsigned Opc = RISCV::ESP_MIN_S8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_s8_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_U16_A_P: { + unsigned Opc = RISCV::ESP_MIN_U16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_u16_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_U32_A_P: { + unsigned Opc = RISCV::ESP_MIN_U32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_u32_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MIN_U8_A_P: { + unsigned Opc = RISCV::ESP_MIN_U8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_min_u8_a first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VABS_16_P: { + unsigned Opc = RISCV::ESP_VABS_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vabs_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(1); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vabs_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VABS_32_P: { + unsigned Opc = RISCV::ESP_VABS_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vabs_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(1); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vabs_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VABS_8_P: { + unsigned Opc = RISCV::ESP_VABS_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vabs_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(1); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vabs_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S16_P: { + unsigned Opc = RISCV::ESP_VADD_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S32_P: { + unsigned Opc = RISCV::ESP_VADD_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S8_P: { + unsigned Opc = RISCV::ESP_VADD_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U16_P: { + unsigned Opc = RISCV::ESP_VADD_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U32_P: { + unsigned Opc = RISCV::ESP_VADD_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U8_P: { + unsigned Opc = RISCV::ESP_VADD_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VADD_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VADD_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vadd_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCLAMP_S16_P: { + unsigned Opc = RISCV::ESP_VCLAMP_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vclamp_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(1); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vclamp_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S16_P: { + unsigned Opc = RISCV::ESP_VMAX_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S32_P: { + unsigned Opc = RISCV::ESP_VMAX_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S8_P: { + unsigned Opc = RISCV::ESP_VMAX_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U16_P: { + unsigned Opc = RISCV::ESP_VMAX_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U32_P: { + unsigned Opc = RISCV::ESP_VMAX_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U8_P: { + unsigned Opc = RISCV::ESP_VMAX_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMAX_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMAX_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmax_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S16_P: { + unsigned Opc = RISCV::ESP_VMIN_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S32_P: { + unsigned Opc = RISCV::ESP_VMIN_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S8_P: { + unsigned Opc = RISCV::ESP_VMIN_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U16_P: { + unsigned Opc = RISCV::ESP_VMIN_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U32_P: { + unsigned Opc = RISCV::ESP_VMIN_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U8_P: { + unsigned Opc = RISCV::ESP_VMIN_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMIN_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMIN_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmin_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S16_P: { + unsigned Opc = RISCV::ESP_VMUL_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S16_S8XS8_P: { + unsigned Opc = RISCV::ESP_VMUL_S16_S8XS8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s16_s8xs8 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s16_s8xs8 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s16_s8xs8 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vmul_s16_s8xs8 first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S32_S16XS16_P: { + unsigned Opc = RISCV::ESP_VMUL_S32_S16XS16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s32_s16xs16 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s32_s16xs16 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s32_s16xs16 first " + "argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vmul_s32_s16xs16 first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S8_P: { + unsigned Opc = RISCV::ESP_VMUL_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U16_P: { + unsigned Opc = RISCV::ESP_VMUL_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U8_P: { + unsigned Opc = RISCV::ESP_VMUL_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VMUL_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VMUL_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vmul_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VPRELU_S16_P: { + unsigned Opc = RISCV::ESP_VPRELU_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vprelu_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vprelu_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vprelu_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VPRELU_S8_P: { + unsigned Opc = RISCV::ESP_VPRELU_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vprelu_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vprelu_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vprelu_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VRELU_S16_P: { + unsigned Opc = RISCV::ESP_VRELU_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vrelu_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VRELU_S8_P: { + unsigned Opc = RISCV::ESP_VRELU_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vrelu_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSADDS_S16_P: { + unsigned Opc = RISCV::ESP_VSADDS_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsadds_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsadds_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSADDS_S8_P: { + unsigned Opc = RISCV::ESP_VSADDS_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsadds_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsadds_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSADDS_U16_P: { + unsigned Opc = RISCV::ESP_VSADDS_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsadds_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsadds_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSADDS_U8_P: { + unsigned Opc = RISCV::ESP_VSADDS_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsadds_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsadds_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_S16_P: { + unsigned Opc = RISCV::ESP_VSAT_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_S32_P: { + unsigned Opc = RISCV::ESP_VSAT_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_S8_P: { + unsigned Opc = RISCV::ESP_VSAT_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_U16_P: { + unsigned Opc = RISCV::ESP_VSAT_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_U32_P: { + unsigned Opc = RISCV::ESP_VSAT_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSAT_U8_P: { + unsigned Opc = RISCV::ESP_VSAT_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QX = MI.getOperand(2); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsat_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vsat_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSSUBS_S16_P: { + unsigned Opc = RISCV::ESP_VSSUBS_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vssubs_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vssubs_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSSUBS_S8_P: { + unsigned Opc = RISCV::ESP_VSSUBS_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vssubs_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vssubs_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSSUBS_U16_P: { + unsigned Opc = RISCV::ESP_VSSUBS_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vssubs_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vssubs_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSSUBS_U8_P: { + unsigned Opc = RISCV::ESP_VSSUBS_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vssubs_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vssubs_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S16_P: { + unsigned Opc = RISCV::ESP_VSUB_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S32_P: { + unsigned Opc = RISCV::ESP_VSUB_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S8_P: { + unsigned Opc = RISCV::ESP_VSUB_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_S8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_S8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_s8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U16_P: { + unsigned Opc = RISCV::ESP_VSUB_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U32_P: { + unsigned Opc = RISCV::ESP_VSUB_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U32_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U32_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u32_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U32_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U32_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u32_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U8_P: { + unsigned Opc = RISCV::ESP_VSUB_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U8_LD_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U8_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QV = MI.getOperand(3); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u8_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSUB_U8_ST_INCP_P: { + unsigned Opc = RISCV::ESP_VSUB_U8_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vsub_u8_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ADDX2_P: { + unsigned Opc = RISCV::ESP_ADDX2; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ADDX4_P: { + unsigned Opc = RISCV::ESP_ADDX4; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SAT_P: { + unsigned Opc = RISCV::ESP_SAT; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS0 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &RSD = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS0.getReg()) + .addReg(RS1.getReg()) + .addReg(RSD.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SUBX2_P: { + unsigned Opc = RISCV::ESP_SUBX2; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SUBX4_P: { + unsigned Opc = RISCV::ESP_SUBX4; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ANDQ_P: { + unsigned Opc = RISCV::ESP_ANDQ; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_andq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_andq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_andq first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_NOTQ_P: { + unsigned Opc = RISCV::ESP_NOTQ; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_notq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_notq first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ORQ_P: { + unsigned Opc = RISCV::ESP_ORQ; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_orq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_orq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_orq first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_XORQ_P: { + unsigned Opc = RISCV::ESP_XORQ; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_xorq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_xorq first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_xorq first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_S16_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_s16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_S32_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_s32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_S8_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_U16_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_u16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_U32_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_u32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_EQ_U8_P: { + unsigned Opc = RISCV::ESP_VCMP_EQ_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_eq_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_eq_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_eq_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_S16_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_s16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_S32_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_s32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_S8_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_U16_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_u16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_U32_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_u32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_GT_U8_P: { + unsigned Opc = RISCV::ESP_VCMP_GT_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_gt_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_gt_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_gt_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_S16_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_s16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_S32_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_s32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_s32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_S8_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_U16_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_u16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_u16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_U32_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_u32 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_u32 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VCMP_LT_U8_P: { + unsigned Opc = RISCV::ESP_VCMP_LT_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vcmp_lt_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vcmp_lt_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vcmp_lt_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOV_S16_QACC_P: { + unsigned Opc = RISCV::ESP_MOV_S16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_mov_s16_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RISCV::Q0 + QUVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOV_S8_QACC_P: { + unsigned Opc = RISCV::ESP_MOV_S8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_mov_s8_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RISCV::Q0 + QUVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOV_U16_QACC_P: { + unsigned Opc = RISCV::ESP_MOV_U16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_mov_u16_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RISCV::Q0 + QUVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOV_U8_QACC_P: { + unsigned Opc = RISCV::ESP_MOV_U8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_mov_u8_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RISCV::Q0 + QUVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_16_A_P: { + unsigned Opc = RISCV::ESP_MOVI_16_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_16_a first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_16_Q_P: { + unsigned Opc = RISCV::ESP_MOVI_16_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_16 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_16_q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_32_A_P: { + unsigned Opc = RISCV::ESP_MOVI_32_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_32_a first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_32_Q_P: { + unsigned Opc = RISCV::ESP_MOVI_32_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_4 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_32_q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_8_A_P: { + unsigned Opc = RISCV::ESP_MOVI_8_A; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_8_a first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVI_8_Q_P: { + unsigned Opc = RISCV::ESP_MOVI_8_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_16 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_movi_8_q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_CFG_P: { + unsigned Opc = RISCV::ESP_MOVX_R_CFG; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_FFT_BIT_WIDTH_P: { + unsigned Opc = RISCV::ESP_MOVX_R_FFT_BIT_WIDTH; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_PERF_P: { + unsigned Opc = RISCV::ESP_MOVX_R_PERF; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + MachineOperand &RS1 = MI.getOperand(1); + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_SAR_P: { + unsigned Opc = RISCV::ESP_MOVX_R_SAR; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_SAR_BYTES_P: { + unsigned Opc = RISCV::ESP_MOVX_R_SAR_BYTES; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_XACC_H_P: { + unsigned Opc = RISCV::ESP_MOVX_R_XACC_H; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_R_XACC_L_P: { + unsigned Opc = RISCV::ESP_MOVX_R_XACC_L; + MachineBasicBlock *MBB = MI.getParent(); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(R1, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_CFG_P: { + unsigned Opc = RISCV::ESP_MOVX_W_CFG; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_FFT_BIT_WIDTH_P: { + unsigned Opc = RISCV::ESP_MOVX_W_FFT_BIT_WIDTH; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_PERF_P: { + unsigned Opc = RISCV::ESP_MOVX_W_PERF; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_SAR_P: { + unsigned Opc = RISCV::ESP_MOVX_W_SAR; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_SAR_BYTES_P: { + unsigned Opc = RISCV::ESP_MOVX_W_SAR_BYTES; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_XACC_H_P: { + unsigned Opc = RISCV::ESP_MOVX_W_XACC_H; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_MOVX_W_XACC_L_P: { + unsigned Opc = RISCV::ESP_MOVX_W_XACC_L; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + BuildMI(*MBB, MI, DL, TII.get(Opc)).addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VEXT_S16_P: { + unsigned Opc = RISCV::ESP_VEXT_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vext_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vext_s16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vext_s16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VEXT_S8_P: { + unsigned Opc = RISCV::ESP_VEXT_S8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vext_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vext_s8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vext_s8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VEXT_U16_P: { + unsigned Opc = RISCV::ESP_VEXT_U16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vext_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vext_u16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vext_u16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VEXT_U8_P: { + unsigned Opc = RISCV::ESP_VEXT_U8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vext_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(1); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vext_u8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(2); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_vext_u8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIP_16_P: { + unsigned Opc = RISCV::ESP_VUNZIP_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzip_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzip_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIP_32_P: { + unsigned Opc = RISCV::ESP_VUNZIP_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzip_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzip_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIP_8_P: { + unsigned Opc = RISCV::ESP_VUNZIP_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzip_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzip_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIPT_16_P: { + unsigned Opc = RISCV::ESP_VUNZIPT_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzipt_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzipt_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vunzipt_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VUNZIPT_8_P: { + unsigned Opc = RISCV::ESP_VUNZIPT_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vunzipt_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vunzipt_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vunzipt_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIP_16_P: { + unsigned Opc = RISCV::ESP_VZIP_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzip_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzip_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIP_32_P: { + unsigned Opc = RISCV::ESP_VZIP_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzip_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzip_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIP_8_P: { + unsigned Opc = RISCV::ESP_VZIP_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzip_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzip_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIPT_16_P: { + unsigned Opc = RISCV::ESP_VZIPT_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzipt_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzipt_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vzipt_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VZIPT_8_P: { + unsigned Opc = RISCV::ESP_VZIPT_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_vzipt_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vzipt_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vzipt_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QXVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ZERO_Q_P: { + unsigned Opc = RISCV::ESP_ZERO_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QZ = MI.getOperand(0); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_zero_q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ZERO_QACC_P: { + unsigned Opc = RISCV::ESP_ZERO_QACC; + MachineBasicBlock *MBB = MI.getParent(); + BuildMI(*MBB, MI, DL, TII.get(Opc)); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ZERO_XACC_P: { + unsigned Opc = RISCV::ESP_ZERO_XACC; + MachineBasicBlock *MBB = MI.getParent(); + BuildMI(*MBB, MI, DL, TII.get(Opc)); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_AMS_S16_LD_INCP_P: { + unsigned Opc = RISCV::ESP_FFT_AMS_S16_LD_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_2 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(6); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(7); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_AMS_S16_LD_INCP_UAUP_P: { + unsigned Opc = RISCV::ESP_FFT_AMS_S16_LD_INCP_UAUP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_2 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(6); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(7); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_incp_uaup " + "first argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_AMS_S16_LD_R32_DECP_P: { + unsigned Opc = RISCV::ESP_FFT_AMS_S16_LD_R32_DECP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_2 = MI.getOperand(4); + MachineOperand &QU = MI.getOperand(5); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(6); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(7); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_ams_s16_ld_r32_decp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_AMS_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_FFT_AMS_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &RS2 = MI.getOperand(5); + MachineOperand &SELECT_2 = MI.getOperand(6); + MachineOperand &QZ = MI.getOperand(7); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_ams_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + unsigned R2 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(R2, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_BITREV_P: { + unsigned Opc = RISCV::ESP_FFT_BITREV; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QV = MI.getOperand(1); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_bitrev first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QVVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_CMUL_S16_LD_XP_P: { + unsigned Opc = RISCV::ESP_FFT_CMUL_S16_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(3); + MachineOperand &SELECT_8 = MI.getOperand(4); + MachineOperand &QZ = MI.getOperand(5); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(6); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_cmul_s16_ld_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_CMUL_S16_ST_XP_P: { + unsigned Opc = RISCV::ESP_FFT_CMUL_S16_ST_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QX = MI.getOperand(1); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_cmul_s16_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_cmul_s16_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(3); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_cmul_s16_st_xp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(4); + MachineOperand &SELECT_4 = MI.getOperand(5); + MachineOperand &UPD_4 = MI.getOperand(6); + MachineOperand &SELECT_8 = MI.getOperand(7); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()) + .addImm(UPD_4.getImm()) + .addImm(SELECT_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_R2BF_S16_P: { + unsigned Opc = RISCV::ESP_FFT_R2BF_S16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_r2bf_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_r2bf_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(2); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_r2bf_s16 first argument, " + "it must bi in range [0,7]"); + MachineOperand &QV = MI.getOperand(4); + unsigned QVVal = QV.getImm(); + assert(QVVal < 8 && "Unexpected value of esp_fft_r2bf_s16 first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QVVal, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_R2BF_S16_ST_INCP_P: { + unsigned Opc = RISCV::ESP_FFT_R2BF_S16_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QX = MI.getOperand(0); + unsigned QXVal = QX.getImm(); + assert(QXVal < 8 && "Unexpected value of esp_fft_r2bf_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_fft_r2bf_s16_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &QZ = MI.getOperand(4); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_fft_r2bf_s16_st_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QXVal) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addImm(SELECT_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_FFT_VST_R32_DECP_P: { + unsigned Opc = RISCV::ESP_FFT_VST_R32_DECP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_fft_vst_r32_decp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &SELECT_2 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_128_USAR_IP_P: { + unsigned Opc = RISCV::ESP_LD_128_USAR_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_ld_128_usar_ip first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_128_USAR_XP_P: { + unsigned Opc = RISCV::ESP_LD_128_USAR_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_ld_128_usar_xp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_XACC_IP_P: { + unsigned Opc = RISCV::ESP_LD_XACC_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_S16_128_IP_P: { + unsigned Opc = RISCV::ESP_LDQA_S16_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_S16_128_XP_P: { + unsigned Opc = RISCV::ESP_LDQA_S16_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_S8_128_IP_P: { + unsigned Opc = RISCV::ESP_LDQA_S8_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_S8_128_XP_P: { + unsigned Opc = RISCV::ESP_LDQA_S8_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_U16_128_IP_P: { + unsigned Opc = RISCV::ESP_LDQA_U16_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_U16_128_XP_P: { + unsigned Opc = RISCV::ESP_LDQA_U16_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_U8_128_IP_P: { + unsigned Opc = RISCV::ESP_LDQA_U8_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDQA_U8_128_XP_P: { + unsigned Opc = RISCV::ESP_LDQA_U8_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_16_IP_P: { + unsigned Opc = RISCV::ESP_VLDBC_16_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_4 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_16_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_16_XP_P: { + unsigned Opc = RISCV::ESP_VLDBC_16_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_16_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_32_IP_P: { + unsigned Opc = RISCV::ESP_VLDBC_32_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_4 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_32_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_32_XP_P: { + unsigned Opc = RISCV::ESP_VLDBC_32_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_32_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_8_IP_P: { + unsigned Opc = RISCV::ESP_VLDBC_8_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_4 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_8_ip first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_4.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDBC_8_XP_P: { + unsigned Opc = RISCV::ESP_VLDBC_8_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldbc_8_xp first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_S16_IP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_S16_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_16_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_s16_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_s16_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_S16_XP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_S16_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_s16_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_s16_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_S8_IP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_S8_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_16_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_s8_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_s8_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_S8_XP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_S8_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_s8_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_s8_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_U16_IP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_U16_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_16_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_u16_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_u16_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_U16_XP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_U16_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_u16_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_u16_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_U8_IP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_U8_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_16_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_u8_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_u8_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_16_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDEXT_U8_XP_P: { + unsigned Opc = RISCV::ESP_VLDEXT_U8_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldext_u8_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(3); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldext_u8_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLDHBC_16_INCP_P: { + unsigned Opc = RISCV::ESP_VLDHBC_16_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vldhbc_16_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_vldhbc_16_incp first " + "argument, it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_QACC_H_H_128_IP_P: { + unsigned Opc = RISCV::ESP_LD_QACC_H_H_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_QACC_H_L_128_IP_P: { + unsigned Opc = RISCV::ESP_LD_QACC_H_L_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_QACC_L_H_128_IP_P: { + unsigned Opc = RISCV::ESP_LD_QACC_L_H_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_QACC_L_L_128_IP_P: { + unsigned Opc = RISCV::ESP_LD_QACC_L_L_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LD_UA_STATE_IP_P: { + unsigned Opc = RISCV::ESP_LD_UA_STATE_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_LDXQ_32_P: { + unsigned Opc = RISCV::ESP_LDXQ_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_ldxq_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(2); + MachineOperand &SELECT_8 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_ldxq_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_4.getImm()) + .addImm(SELECT_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_QACC_H_H_128_IP_P: { + unsigned Opc = RISCV::ESP_ST_QACC_H_H_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_QACC_H_L_128_IP_P: { + unsigned Opc = RISCV::ESP_ST_QACC_H_L_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_QACC_L_H_128_IP_P: { + unsigned Opc = RISCV::ESP_ST_QACC_L_H_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_QACC_L_L_128_IP_P: { + unsigned Opc = RISCV::ESP_ST_QACC_L_L_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_UA_STATE_IP_P: { + unsigned Opc = RISCV::ESP_ST_UA_STATE_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_STXQ_32_P: { + unsigned Opc = RISCV::ESP_STXQ_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_stxq_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_stxq_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_4 = MI.getOperand(3); + MachineOperand &SELECT_8 = MI.getOperand(4); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QWVal) + .addReg(RISCV::Q0 + QUVal) + .addImm(SELECT_4.getImm()) + .addImm(SELECT_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_128_IP_P: { + unsigned Opc = RISCV::ESP_VLD_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_16 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_128_ip first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_128_XP_P: { + unsigned Opc = RISCV::ESP_VLD_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_128_xp first argument, it " + "must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_H_64_IP_P: { + unsigned Opc = RISCV::ESP_VLD_H_64_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_h_64_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_H_64_XP_P: { + unsigned Opc = RISCV::ESP_VLD_H_64_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_h_64_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_L_64_IP_P: { + unsigned Opc = RISCV::ESP_VLD_L_64_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_l_64_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VLD_L_64_XP_P: { + unsigned Opc = RISCV::ESP_VLD_L_64_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vld_l_64_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_128_IP_P: { + unsigned Opc = RISCV::ESP_VST_128_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_128_ip first argument, it " + "must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_16 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_128_XP_P: { + unsigned Opc = RISCV::ESP_VST_128_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_128_xp first argument, it " + "must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_H_64_IP_P: { + unsigned Opc = RISCV::ESP_VST_H_64_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_h_64_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_8 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_H_64_XP_P: { + unsigned Opc = RISCV::ESP_VST_H_64_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_h_64_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_L_64_IP_P: { + unsigned Opc = RISCV::ESP_VST_L_64_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QU = MI.getOperand(0); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_l_64_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &OFFSET_256_8 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VST_L_64_XP_P: { + unsigned Opc = RISCV::ESP_VST_L_64_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vst_l_64_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QUVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SLCI_2Q_P: { + unsigned Opc = RISCV::ESP_SLCI_2Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_slci_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_slci_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SLCXXP_2Q_P: { + unsigned Opc = RISCV::ESP_SLCXXP_2Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_slcxxp_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(3); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_slcxxp_2q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRC_Q_P: { + unsigned Opc = RISCV::ESP_SRC_Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_src_q first argument, it must " + "bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_src_q first argument, it must " + "bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_src_q first argument, it must " + "bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRC_Q_LD_IP_P: { + unsigned Opc = RISCV::ESP_SRC_Q_LD_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_src_q_ld_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(1); + MachineOperand &QW = MI.getOperand(2); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_src_q_ld_ip first argument, " + "it must bi in range [0,7]"); + MachineOperand &OFFSET_256_16 = MI.getOperand(3); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_src_q_ld_ip first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QWVal) + .addImm(OFFSET_256_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRC_Q_LD_XP_P: { + unsigned Opc = RISCV::ESP_SRC_Q_LD_XP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS2 = MI.getOperand(0); + MachineOperand &QY = MI.getOperand(1); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_src_q_ld_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + MachineOperand &QW = MI.getOperand(3); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_src_q_ld_xp first argument, " + "it must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(4); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_src_q_ld_xp first argument, " + "it must bi in range [0,7]"); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal) + .addReg(RS1.getReg()) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRC_Q_QUP_P: { + unsigned Opc = RISCV::ESP_SRC_Q_QUP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_src_q_qup first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_src_q_qup first argument, it " + "must bi in range [0,7]"); + MachineOperand &QZ = MI.getOperand(2); + unsigned QZVal = QZ.getImm(); + assert(QZVal < 8 && "Unexpected value of esp_src_q_qup first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QZVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCI_2Q_P: { + unsigned Opc = RISCV::ESP_SRCI_2Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_srci_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srci_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &SELECT_16 = MI.getOperand(2); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_16.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_S16_Q_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_S16_Q_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcmb_s16_q_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_s16_q_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_S16_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_S16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_s16_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_S8_Q_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_S8_Q_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcmb_s8_q_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_s8_q_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_S8_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_S8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_s8_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_U16_Q_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_U16_Q_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcmb_u16_q_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_u16_q_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_U16_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_U16_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_u16_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_U8_Q_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_U8_Q_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QW = MI.getOperand(0); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcmb_u8_q_qacc first " + "argument, it must bi in range [0,7]"); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_u8_q_qacc first " + "argument, it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCMB_U8_QACC_P: { + unsigned Opc = RISCV::ESP_SRCMB_U8_QACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &SELECT_2 = MI.getOperand(1); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_srcmb_u8_qacc first argument, " + "it must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RS1.getReg()) + .addImm(SELECT_2.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCQ_128_ST_INCP_P: { + unsigned Opc = RISCV::ESP_SRCQ_128_ST_INCP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_srcq_128_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcq_128_st_incp first " + "argument, it must bi in range [0,7]"); + MachineOperand &RS1 = MI.getOperand(2); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRCXXP_2Q_P: { + unsigned Opc = RISCV::ESP_SRCXXP_2Q; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &RS2 = MI.getOperand(1); + MachineOperand &QY = MI.getOperand(2); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_srcxxp_2q first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(3); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_srcxxp_2q first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QYVal, RegState::Define) + .addReg(RISCV::Q0 + QWVal, RegState::Define) + .addReg(RS1.getReg()) + .addReg(RS2.getReg()) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRS_S_XACC_P: { + unsigned Opc = RISCV::ESP_SRS_S_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_SRS_U_XACC_P: { + unsigned Opc = RISCV::ESP_SRS_U_XACC; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSL_32_P: { + unsigned Opc = RISCV::ESP_VSL_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsl_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsl_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSLD_16_P: { + unsigned Opc = RISCV::ESP_VSLD_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsld_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsld_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsld_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSLD_32_P: { + unsigned Opc = RISCV::ESP_VSLD_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsld_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsld_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsld_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSLD_8_P: { + unsigned Opc = RISCV::ESP_VSLD_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsld_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsld_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsld_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSR_S32_P: { + unsigned Opc = RISCV::ESP_VSR_S32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsr_s32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsr_s32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSR_U32_P: { + unsigned Opc = RISCV::ESP_VSR_U32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsr_u32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(1); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsr_u32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSRD_16_P: { + unsigned Opc = RISCV::ESP_VSRD_16; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsrd_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsrd_16 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsrd_16 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSRD_32_P: { + unsigned Opc = RISCV::ESP_VSRD_32; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsrd_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsrd_32 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsrd_32 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_VSRD_8_P: { + unsigned Opc = RISCV::ESP_VSRD_8; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &QY = MI.getOperand(0); + unsigned QYVal = QY.getImm(); + assert(QYVal < 8 && "Unexpected value of esp_vsrd_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QW = MI.getOperand(1); + unsigned QWVal = QW.getImm(); + assert(QWVal < 8 && "Unexpected value of esp_vsrd_8 first argument, it " + "must bi in range [0,7]"); + MachineOperand &QU = MI.getOperand(2); + unsigned QUVal = QU.getImm(); + assert(QUVal < 8 && "Unexpected value of esp_vsrd_8 first argument, it " + "must bi in range [0,7]"); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(RISCV::Q0 + QUVal, RegState::Define) + .addReg(RISCV::Q0 + QYVal) + .addReg(RISCV::Q0 + QWVal); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_S_XACC_IP_P: { + unsigned Opc = RISCV::ESP_ST_S_XACC_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + case RISCV::ESP_ST_U_XACC_IP_P: { + unsigned Opc = RISCV::ESP_ST_U_XACC_IP; + MachineBasicBlock *MBB = MI.getParent(); + MachineOperand &RS1 = MI.getOperand(0); + MachineOperand &OFFSET_256_8 = MI.getOperand(1); + const TargetRegisterClass *RC = &RISCV::GPRPIERegClass; + unsigned R1 = MRI.createVirtualRegister(RC); + BuildMI(*MBB, MI, DL, TII.get(Opc)) + .addReg(R1, RegState::Define) + .addReg(RS1.getReg()) + .addImm(OFFSET_256_8.getImm()); + + MI.eraseFromParent(); + return MBB; + } + } +} diff --git a/llvm/lib/Target/RISCV/RISCVESP32P4Operands.td b/llvm/lib/Target/RISCV/RISCVESP32P4Operands.td new file mode 100644 index 00000000000000..0d77783aff0e24 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVESP32P4Operands.td @@ -0,0 +1,134 @@ +//===- RISCVESP32P4Operands.td - ESP32P4 instruction operands -*- tblgen-*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// Immediate operands with a shared generic render method. +class P4ImmAsmOperand : AsmOperandClass { + let Name = name; + let RenderMethod = "addImmOperands"; + let DiagnosticType = !strconcat("Invalid", name); +} + +class Immediate + : Operand, ImmLeaf { + let PrintMethod = "print"#asmop; + let ParserMatchClass = !cast(asmop); +} +/// --------------------------- /// + +// imm8 predicate - Immediate in the range [-128,127] +def Imm8_AsmOperand : P4ImmAsmOperand<"Imm8">; +def imm8 : Immediate= -128 && Imm <= 127; }], "Imm8_AsmOperand"> { + let EncoderMethod = "getImm8OpValue"; + let DecoderMethod = "decodeImm8Operand"; +} + +// offset_16_16 predicate - 4-bit signed immediate in the range [-128,112] with an interval +// of 16. +def Offset_16_16_AsmOperand: P4ImmAsmOperand<"Offset_16_16">; +def offset_16_16: Immediate= -128 && Imm <= 112) && ((Imm & 0xf) == 0); }], "Offset_16_16_AsmOperand"> { + let EncoderMethod = "getOffset_16_16OpValue"; + let DecoderMethod = "decodeOffset_16_16Operand"; +} + +// offset_256_8 predicate - 4-bit signed immediate in the range [-1024,1016] with an interval +// of 8. +def Offset_256_8_AsmOperand: P4ImmAsmOperand<"Offset_256_8">; +def offset_256_8: Immediate= -1024 && Imm <= 1016) && ((Imm & 0x7) == 0); }], "Offset_256_8_AsmOperand"> { + let EncoderMethod = "getOffset_256_8OpValue"; + let DecoderMethod = "decodeOffset_256_8Operand"; +} + +// offset_256_16 predicate - 8-bit signed immediate in the range [-2048,2032] with an interval +// of 16. +def Offset_256_16_AsmOperand: P4ImmAsmOperand<"Offset_256_16">; +def offset_256_16: Immediate= -2048 && Imm <= 2032) && ((Imm & 0xf) == 0); }], "Offset_256_16_AsmOperand"> { + let EncoderMethod = "getOffset_256_16OpValue"; + let DecoderMethod = "decodeOffset_256_16Operand"; +} + +// offset_256_4 predicate - 4-bit signed immediate in the range [-512,508] with an interval +// of 4. +def Offset_256_4_AsmOperand: P4ImmAsmOperand<"Offset_256_4">; +def offset_256_4: Immediate= -512 && Imm <= 508) && ((Imm & 0x3) == 0); }], "Offset_256_4_AsmOperand"> { + let EncoderMethod = "getOffset_256_4OpValue"; + let DecoderMethod = "decodeOffset_256_4Operand"; +} + +// select_2 predicate - Immediate in the range [0,1] +def Select_2_AsmOperand: P4ImmAsmOperand<"Select_2">; +def select_2: Immediate= 0 && Imm <= 1; }], "Select_2_AsmOperand"> { + let EncoderMethod = "getSelect_2OpValue"; + let DecoderMethod = "decodeSelect_2Operand"; +} + +// select_4 predicate - Immediate in the range [0,3] +def Select_4_AsmOperand: P4ImmAsmOperand<"Select_4">; +def select_4: Immediate= 0 && Imm <= 3; }], "Select_4_AsmOperand"> { + let EncoderMethod = "getSelect_4OpValue"; + let DecoderMethod = "decodeSelect_4Operand"; +} + +// select_8 predicate - Immediate in the range [0,7] +def Select_8_AsmOperand: P4ImmAsmOperand<"Select_8">; +def select_8: Immediate= 0 && Imm <= 7; }], "Select_8_AsmOperand"> { + let EncoderMethod = "getSelect_8OpValue"; + let DecoderMethod = "decodeSelect_8Operand"; +} + +// select_16 predicate - Immediate in the range [0,15] +def Select_16_AsmOperand: P4ImmAsmOperand<"Select_16">; +def select_16: Immediate= 0 && Imm <= 15; }], "Select_16_AsmOperand"> { + let EncoderMethod = "getSelect_16OpValue"; + let DecoderMethod = "decodeSelect_16Operand"; +} + +def uimm10_step4 : Operand { + // let ParserMatchClass = Simm21Lsb0JALAsmOperand; + let ParserMatchClass = UImmAsmOperand<10>; + let PrintMethod = "printBranchOperand"; + let EncoderMethod = "getUImm10_Step4Operand"; + let DecoderMethod = "decodeUImm10_Step4Operand"; + let MCOperandPredicate = [{ + int64_t Imm; + if (MCOp.evaluateAsConstantImm(Imm)) + return isUInt<10>(); + return MCOp.isBareSymbolRef(); + }]; + let OperandType = "OPERAND_PCREL"; +} + +def uimm13_step4 : Operand { + let ParserMatchClass = UImmAsmOperand<13>; + let PrintMethod = "printBranchOperand"; + let EncoderMethod = "getUImm13_Step4Operand"; + let DecoderMethod = "decodeUImm13_Step4Operand"; + let MCOperandPredicate = [{ + int64_t Imm; + if (MCOp.evaluateAsConstantImm(Imm)) + return isUInt<10>(); + return MCOp.isBareSymbolRef(); + }]; + let OperandType = "OPERAND_PCREL"; +} + +// A 12-bit unsigned immediate. +def uimm12 : RISCVOp, + ImmLeaf(Imm);}]> { + let ParserMatchClass = UImmAsmOperand<12>; + let EncoderMethod = "getImmOpValue"; + let DecoderMethod = "decodeUImmOperand<12>"; + let OperandType = "OPERAND_UIMM12"; + let MCOperandPredicate = [{ + uint64_t Imm; + if (!MCOp.evaluateAsConstantImm(Imm)) + return false; + return isUInt<12>(Imm); + }]; +} diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 26451c80f57b42..916e163cc60c80 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -977,6 +977,9 @@ def HasVendorXCVbi AssemblerPredicate<(all_of FeatureVendorXCVbi), "'XCVbi' (CORE-V Immediate Branching)">; +def FeatureVendorESP32P4 : SubtargetFeature<"xesppie", "HasVendorESP32P4", "true", "'Espressif ESP32P4'">; +def HasVendorESP32P4 : Predicate<"Subtarget->hasVendorESP32P4()">, AssemblerPredicate<(all_of FeatureVendorESP32P4), "'Espressif ESP32P4'">; + //===----------------------------------------------------------------------===// // LLVM specific features and extensions //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index a0cec426002b6f..396a7b8f2d1c95 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -118,6 +118,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, // Set up the register classes. addRegisterClass(XLenVT, &RISCV::GPRRegClass); + + if (Subtarget.hasVendorESP32P4()) { + static const MVT::SimpleValueType QRVec[] = {MVT::v16i8, MVT::v4i32}; + for (auto st : QRVec) + addRegisterClass(st, &RISCV::QRRegClass); + } + if (Subtarget.is64Bit() && RV64LegalI32) addRegisterClass(MVT::i32, &RISCV::GPRRegClass); @@ -17161,9 +17168,14 @@ static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB, MachineBasicBlock * RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const { + const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + DebugLoc DL = MI.getDebugLoc(); + switch (MI.getOpcode()) { default: - llvm_unreachable("Unexpected instr type to insert"); + return emitDSPInstrWithCustomInserter(MI, BB, TII, MF, MRI, DL); case RISCV::ReadCycleWide: assert(!Subtarget.is64Bit() && "ReadCycleWrite is only to be used on riscv32"); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index 30b9ad7e6f6f32..7b506d65f218f3 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -829,6 +829,10 @@ class RISCVTargetLowering : public TargetLowering { MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const override; + MachineBasicBlock *emitDSPInstrWithCustomInserter( + MachineInstr &MI, MachineBasicBlock *MBB, const TargetInstrInfo &TII, + MachineFunction *MF, MachineRegisterInfo &MRI, DebugLoc DL) const; + /// RISCVCCAssignFn - This target-specific function extends the default /// CCValAssign with additional information used to lower RISC-V calling /// conventions. diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormatsESP32P4.td b/llvm/lib/Target/RISCV/RISCVInstrFormatsESP32P4.td new file mode 100644 index 00000000000000..6adaa56cc656d0 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInstrFormatsESP32P4.td @@ -0,0 +1,42 @@ +//====-- RISCVInstrFormatsESP32P4.td - ESP32P4 Instr Formats -*- tablegen -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the RISC-V ESP32-P4 extension instruction formats. +// +//===----------------------------------------------------------------------===// + +class Esp32P4Inst pattern, + InstrItinClass itin = NoItinerary> + : Instruction, Requires<[HasVendorESP32P4]> { + let Namespace = "RISCV"; + let DecoderNamespace = "ESP32P4"; + field bits<32> Inst; + field bits<32> SoftFail = 0; + + let TSFlags{4-0} = 23; + let Size = 4; + + let OutOperandList = outs; + let InOperandList = ins; + + let AsmString = asmstr; + let Pattern = pattern; + + let Itinerary = itin; +} + + +// Pseudo instructions +class PseudoESP32P4 pattern> + : Esp32P4Inst { + let isPseudo = 1; + let isCodeGenOnly = 1; + let mayLoad = 1; + let mayStore = 1; + let hasSideEffects = 1; +} \ No newline at end of file diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index d5b1ddfbeb3dc9..3d63fd8ae4d459 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -2007,6 +2007,12 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, case RISCVOp::OPERAND_UIMM10_LSB00_NONZERO: Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0); break; + case RISCVOp::OPERAND_UIMM10_STEP4: + Ok = isUInt<10>(Imm) && (Imm != 0); + break; + case RISCVOp::OPERAND_UIMM13_STEP4: + Ok = isUInt<13>(Imm) && (Imm != 0); + break; case RISCVOp::OPERAND_ZERO: Ok = Imm == 0; break; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index 114329c2c7c5f3..69bbd3b411e578 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -2179,6 +2179,7 @@ include "RISCVInstrInfoXVentana.td" include "RISCVInstrInfoXTHead.td" include "RISCVInstrInfoXSf.td" include "RISCVInstrInfoXCV.td" +include "RISCVInstrInfoESP32P4.td" //===----------------------------------------------------------------------===// // Global ISel diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td b/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td new file mode 100644 index 00000000000000..bd97a927ea98f6 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoESP32P4.td @@ -0,0 +1,15603 @@ +//===- RISCVInstrInfoP4.td - RISCV Target Description -*- tablegen -*------===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the RISCV ESP32P4 DSP instructions in TableGen format. +// +// These definitions are generated +// This file is generated +// +//===----------------------------------------------------------------------===// + +include "RISCVESP32P4Operands.td" +include "RISCVInstrFormatsESP32P4.td" +include "RISCVInstrInfoP4HWLP.td" + +// This file is generated + +def ESP_VCMULAS_S16_QACC_H: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vcmulas.s16.qacc.h\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_H_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vcmulas_s16_qacc_h_p $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_h timm:$qx, timm:$qy)]>; + +def ESP_VCMULAS_S16_QACC_H_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vcmulas.s16.qacc.h.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-19} = off1616{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_H_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vcmulas_s16_qacc_h_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_h_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VCMULAS_S16_QACC_H_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vcmulas.s16.qacc.h.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_H_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vcmulas_s16_qacc_h_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_h_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VCMULAS_S16_QACC_L: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vcmulas.s16.qacc.l\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_L_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vcmulas_s16_qacc_l_p $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_l timm:$qx, timm:$qy)]>; + +def ESP_VCMULAS_S16_QACC_L_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vcmulas.s16.qacc.l.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-19} = off1616{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_L_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vcmulas_s16_qacc_l_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_l_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VCMULAS_S16_QACC_L_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vcmulas.s16.qacc.l.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S16_QACC_L_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vcmulas_s16_qacc_l_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vcmulas_s16_qacc_l_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VCMULAS_S8_QACC_H: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vcmulas.s8.qacc.h\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_H_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vcmulas_s8_qacc_h_p $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_h timm:$qx, timm:$qy)]>; + +def ESP_VCMULAS_S8_QACC_H_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vcmulas.s8.qacc.h.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-19} = off1616{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_H_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vcmulas_s8_qacc_h_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_h_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VCMULAS_S8_QACC_H_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vcmulas.s8.qacc.h.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_H_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vcmulas_s8_qacc_h_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_h_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VCMULAS_S8_QACC_L: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vcmulas.s8.qacc.l\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_L_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vcmulas_s8_qacc_l_p $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_l timm:$qx, timm:$qy)]>; + +def ESP_VCMULAS_S8_QACC_L_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vcmulas.s8.qacc.l.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-19} = off1616{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_L_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vcmulas_s8_qacc_l_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_l_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VCMULAS_S8_QACC_L_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vcmulas.s8.qacc.l.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMULAS_S8_QACC_L_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vcmulas_s8_qacc_l_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vcmulas_s8_qacc_l_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.s16.qacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_s16_qacc_p $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_S16_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s16.qacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_s16_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_S16_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s16.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s16_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S16_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s16.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_s16_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_S16_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.s16.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_s16_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_S16_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.s16.xacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_s16_xacc_p $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_S16_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s16.xacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_s16_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_S16_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s16.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s16_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S16_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s16.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_s16_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_S16_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.s16.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_s16_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s16_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_S8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.s8.qacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_s8_qacc_p $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_S8_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s8.qacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_s8_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_S8_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s8.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s8_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S8_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s8.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_s8_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_S8_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.s8.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_s8_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_S8_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.s8.xacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_s8_xacc_p $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_S8_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s8.xacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_s8_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_S8_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s8.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s8_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S8_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.s8.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_s8_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_S8_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.s8.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_s8_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_s8_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_U16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.u16.qacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_u16_qacc_p $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_U16_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u16.qacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_u16_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_U16_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u16.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u16_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U16_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u16.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_u16_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_U16_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.u16.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_u16_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_U16_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.u16.xacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_u16_xacc_p $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_U16_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u16.xacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_u16_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_U16_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u16.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u16_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U16_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u16.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_u16_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_U16_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.u16.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_u16_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u16_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_U8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.u8.qacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_u8_qacc_p $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_U8_QACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u8.qacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_u8_qacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_U8_QACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u8.qacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u8_qacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U8_QACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u8.qacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_u8_qacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_U8_QACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.u8.qacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_u8_qacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_U8_XACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy), + "esp.vmulas.u8.xacc\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vmulas_u8_xacc_p $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc timm:$qx, timm:$qy)]>; + +def ESP_VMULAS_U8_XACC_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u8.xacc.ld.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu), + "!esp_vmulas_u8_xacc_ld_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc_ld_ip timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$off1616, timm:$qu)]>; + +def ESP_VMULAS_U8_XACC_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u8.xacc.ld.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u8_xacc_ld_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U8_XACC_ST_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vmulas.u8.xacc.st.ip\t $qu, $rs1, $off1616, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<4> off1616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-19} = off1616{3-1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off1616{0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_ST_IP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, offset_16_16:$off1616), + "!esp_vmulas_u8_xacc_st_ip_p $qu, $rs1, $off1616, $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc_st_ip timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$off1616)]>; + +def ESP_VMULAS_U8_XACC_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmulas.u8.xacc.st.xp\t $qu, $rs1, $rs2, $qx, $qy", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_XACC_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1), + "!esp_vmulas_u8_xacc_st_xp_p $qu, $rs1, $rs2, $qx, $qy", + [(int_riscv_esp_vmulas_u8_xacc_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VMULAS_S16_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s16.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S16_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s16_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", + [(int_riscv_esp_vmulas_s16_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_S8_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.s8.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_S8_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_s8_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", + [(int_riscv_esp_vmulas_s8_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U16_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u16.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U16_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u16_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", + [(int_riscv_esp_vmulas_u16_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VMULAS_U8_QACC_LDBC_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmulas.u8.qacc.ldbc.incp\t $qu, $rs1, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMULAS_U8_QACC_LDBC_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qu), + "!esp_vmulas_u8_qacc_ldbc_incp_p $qu, $rs1, $qx, $qy", + [(int_riscv_esp_vmulas_u8_qacc_ldbc_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VSMULAS_S16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), + "esp.vsmulas.s16.qacc\t $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<4> sel16; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_S16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_16:$sel16), + "!esp_vsmulas_s16_qacc_p $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_s16_qacc timm:$qx, timm:$qy, timm:$sel16)]>; + +def ESP_VSMULAS_S16_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_16:$sel16), + "esp.vsmulas.s16.qacc.ld.incp\t $qu, $rs1, $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> sel16; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-19} = sel16{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_S16_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), + "!esp_vsmulas_s16_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_s16_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + +def ESP_VSMULAS_S8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), + "esp.vsmulas.s8.qacc\t $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<4> sel16; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_S8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_16:$sel16), + "!esp_vsmulas_s8_qacc_p $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_s8_qacc timm:$qx, timm:$qy, timm:$sel16)]>; + +def ESP_VSMULAS_S8_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_16:$sel16), + "esp.vsmulas.s8.qacc.ld.incp\t $qu, $rs1, $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> sel16; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-19} = sel16{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_S8_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), + "!esp_vsmulas_s8_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_s8_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + +def ESP_VSMULAS_U16_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), + "esp.vsmulas.u16.qacc\t $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<4> sel16; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_U16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_16:$sel16), + "!esp_vsmulas_u16_qacc_p $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_u16_qacc timm:$qx, timm:$qy, timm:$sel16)]>; + +def ESP_VSMULAS_U16_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_16:$sel16), + "esp.vsmulas.u16.qacc.ld.incp\t $qu, $rs1, $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> sel16; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-19} = sel16{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_U16_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), + "!esp_vsmulas_u16_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_u16_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + +def ESP_VSMULAS_U8_QACC: Esp32P4Inst<(outs), (ins QR:$qx, QR:$qy, select_16:$sel16), + "esp.vsmulas.u8.qacc\t $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<4> sel16; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_U8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_16:$sel16), + "!esp_vsmulas_u8_qacc_p $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_u8_qacc timm:$qx, timm:$qy, timm:$sel16)]>; + +def ESP_VSMULAS_U8_QACC_LD_INCP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_16:$sel16), + "esp.vsmulas.u8.qacc.ld.incp\t $qu, $rs1, $qx, $qy, $sel16", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<4> sel16; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-19} = sel16{3-0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSMULAS_U8_QACC_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_16:$sel16, imm8:$qu), + "!esp_vsmulas_u8_qacc_ld_incp_p $qu, $rs1, $qx, $qy, $sel16", + [(int_riscv_esp_vsmulas_u8_qacc_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel16, timm:$qu)]>; + +def ESP_CMUL_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), + "esp.cmul.s16\t $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<2> sel4; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16-15} = sel4{1-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_4:$sel4, imm8:$qz), + "!esp_cmul_s16_p $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s16 timm:$qx, timm:$qy, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.s16.ld.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), + "!esp_cmul_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + +def ESP_CMUL_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_cmul_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), + "esp.cmul.s8\t $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<2> sel4; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16-15} = sel4{1-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_4:$sel4, imm8:$qz), + "!esp_cmul_s8_p $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s8 timm:$qx, timm:$qy, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.s8.ld.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), + "!esp_cmul_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + +def ESP_CMUL_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_cmul_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), + "esp.cmul.u16\t $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<2> sel4; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16-15} = sel4{1-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_4:$sel4, imm8:$qz), + "!esp_cmul_u16_p $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u16 timm:$qx, timm:$qy, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.u16.ld.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), + "!esp_cmul_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + +def ESP_CMUL_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_cmul_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy, select_4:$sel4), + "esp.cmul.u8\t $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<2> sel4; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16-15} = sel4{1-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_4:$sel4, imm8:$qz), + "!esp_cmul_u8_p $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u8 timm:$qx, timm:$qy, timm:$sel4, timm:$qz)]>; + +def ESP_CMUL_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.u8.ld.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz, imm8:$qu), + "!esp_cmul_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz, timm:$qu)]>; + +def ESP_CMUL_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4), + "esp.cmul.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21-20} = sel4{1-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_CMUL_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_cmul_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy, $sel4", + [(int_riscv_esp_cmul_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_MAX_S16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.s16.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_S16_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_s16_a_p $qw, $rd", + [(int_riscv_esp_max_s16_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_S32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.s32.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_S32_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_s32_a_p $qw, $rd", + [(int_riscv_esp_max_s32_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_S8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.s8.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_S8_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_s8_a_p $qw, $rd", + [(int_riscv_esp_max_s8_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_U16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.u16.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_U16_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_u16_a_p $qw, $rd", + [(int_riscv_esp_max_u16_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_U32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.u32.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_U32_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_u32_a_p $qw, $rd", + [(int_riscv_esp_max_u32_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MAX_U8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.max.u8.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MAX_U8_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_max_u8_a_p $qw, $rd", + [(int_riscv_esp_max_u8_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_S16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.s16.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_S16_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_s16_a_p $qw, $rd", + [(int_riscv_esp_min_s16_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_S32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.s32.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_S32_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_s32_a_p $qw, $rd", + [(int_riscv_esp_min_s32_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_S8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.s8.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_S8_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_s8_a_p $qw, $rd", + [(int_riscv_esp_min_s8_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_U16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.u16.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_U16_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_u16_a_p $qw, $rd", + [(int_riscv_esp_min_u16_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_U32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.u32.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_U32_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_u32_a_p $qw, $rd", + [(int_riscv_esp_min_u32_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_MIN_U8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qw), + "esp.min.u8.a\t $qw, $rd", []> +{ + bits<3> qw; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MIN_U8_A_P : PseudoESP32P4<(outs), (ins imm8:$qw, GPRPIE:$rd), + "!esp_min_u8_a_p $qw, $rd", + [(int_riscv_esp_min_u8_a timm:$qw, GPRPIE:$rd)]>; + +def ESP_VABS_16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qy), + "esp.vabs.16\t $qv, $qy", []> +{ + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VABS_16_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qv), + "!esp_vabs_16_p $qv, $qy", + [(int_riscv_esp_vabs_16 timm:$qy, timm:$qv)]>; + +def ESP_VABS_32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qy), + "esp.vabs.32\t $qv, $qy", []> +{ + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VABS_32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qv), + "!esp_vabs_32_p $qv, $qy", + [(int_riscv_esp_vabs_32 timm:$qy, timm:$qv)]>; + +def ESP_VABS_8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qy), + "esp.vabs.8\t $qv, $qy", []> +{ + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VABS_8_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qv), + "!esp_vabs_8_p $qv, $qy", + [(int_riscv_esp_vabs_8 timm:$qy, timm:$qv)]>; + +def ESP_VADD_S16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.s16\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_s16_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_s16 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_S16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.s16.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_s16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_S16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.s16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_s16_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_S32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.s32\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_s32_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_s32 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_S32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.s32.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_s32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_S32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.s32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_s32_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_S8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.s8\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_s8_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_s8 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_S8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.s8.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_s8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_S8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.s8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_s8_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_U16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.u16\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_u16_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_u16 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_U16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.u16.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_u16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_U16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.u16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_u16_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_U32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.u32\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_u32_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_u32 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_U32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.u32.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_u32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_U32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.u32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_u32_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VADD_U8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vadd.u8\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vadd_u8_p $qv, $qx, $qy", + [(int_riscv_esp_vadd_u8 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VADD_U8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vadd.u8.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vadd_u8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VADD_U8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vadd.u8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VADD_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vadd_u8_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vadd_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VCLAMP_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, select_16:$sel16), + "esp.vclamp.s16\t $qz, $qx, $sel16", []> +{ + bits<3> qx; + bits<4> sel16; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = sel16{3-2}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19-18} = sel16{1-0}; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCLAMP_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, select_16:$sel16, imm8:$qz), + "!esp_vclamp_s16_p $qz, $qx, $sel16", + [(int_riscv_esp_vclamp_s16 timm:$qx, timm:$sel16, timm:$qz)]>; + +def ESP_VMAX_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.s16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_S32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.s32.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_s32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_S32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.s32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_s32_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.s8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.u16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_U32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.u32.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_u32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_U32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.u32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_u32_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMAX_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmax.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmax_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vmax_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMAX_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmax.u8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmax_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMAX_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmax.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMAX_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmax_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmax_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.s16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_S32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.s32.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_s32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_S32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.s32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_s32_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.s8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.u16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_U32_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.u32.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_u32_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_U32_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.u32.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_u32_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMIN_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmin.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmin_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vmin_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMIN_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmin.u8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmin_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMIN_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmin.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMIN_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmin_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmin_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMUL_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmul.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmul_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vmul_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMUL_S16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmul.s16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmul_s16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMUL_S16_S8XS8: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vmul.s16.s8xs8\t $qz, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S16_S8XS8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz, imm8:$qv), + "!esp_vmul_s16_s8xs8_p $qz, $qv, $qx, $qy", + [(int_riscv_esp_vmul_s16_s8xs8 timm:$qx, timm:$qy, timm:$qz, timm:$qv)]>; + +def ESP_VMUL_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmul.s16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmul_s16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMUL_S32_S16XS16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vmul.s32.s16xs16\t $qz, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S32_S16XS16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz, imm8:$qv), + "!esp_vmul_s32_s16xs16_p $qz, $qv, $qx, $qy", + [(int_riscv_esp_vmul_s32_s16xs16 timm:$qx, timm:$qy, timm:$qz, timm:$qv)]>; + +def ESP_VMUL_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmul.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmul_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vmul_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMUL_S8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmul.s8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmul_s8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMUL_S8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmul.s8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmul_s8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMUL_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmul.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmul_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vmul_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMUL_U16_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmul.u16.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmul_u16_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMUL_U16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmul.u16.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmul_u16_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VMUL_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vmul.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vmul_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vmul_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VMUL_U8_LD_INCP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vmul.u8.ld.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qz, imm8:$qu), + "!esp_vmul_u8_ld_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qz, timm:$qu)]>; + +def ESP_VMUL_U8_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vmul.u8.st.incp\t $qu, $rs1, $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VMUL_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qz), + "!esp_vmul_u8_st_incp_p $qu, $rs1, $qz, $qx, $qy", + [(int_riscv_esp_vmul_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qz)]>; + +def ESP_VPRELU_S16: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, QR:$qx, QR:$qy), + "esp.vprelu.s16\t $qz, $qy, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VPRELU_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vprelu_s16_p $qz, $qy, $qx, $rs1", + [(int_riscv_esp_vprelu_s16 GPRPIE:$rs1, timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VPRELU_S8: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, QR:$qx, QR:$qy), + "esp.vprelu.s8\t $qz, $qy, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VPRELU_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vprelu_s8_p $qz, $qy, $qx, $rs1", + [(int_riscv_esp_vprelu_s8 GPRPIE:$rs1, timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VRELU_S16: Esp32P4Inst<(outs QR:$qyr), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qy), + "esp.vrelu.s16\t $qy, $rs2, $rs1", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qy; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VRELU_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qy), + "!esp_vrelu_s16_p $qy, $rs2, $rs1", + [(int_riscv_esp_vrelu_s16 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qy)]>; + +def ESP_VRELU_S8: Esp32P4Inst<(outs QR:$qyr), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qy), + "esp.vrelu.s8\t $qy, $rs2, $rs1", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qy; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VRELU_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qy), + "!esp_vrelu_s8_p $qy, $rs2, $rs1", + [(int_riscv_esp_vrelu_s8 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qy)]>; + +def ESP_VSADDS_S16: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vsadds.s16\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSADDS_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vsadds_s16_p $qv, $qx, $rs1", + [(int_riscv_esp_vsadds_s16 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSADDS_S8: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vsadds.s8\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSADDS_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vsadds_s8_p $qv, $qx, $rs1", + [(int_riscv_esp_vsadds_s8 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSADDS_U16: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vsadds.u16\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSADDS_U16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vsadds_u16_p $qv, $qx, $rs1", + [(int_riscv_esp_vsadds_u16 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSADDS_U8: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vsadds.u8\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSADDS_U8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vsadds_u8_p $qv, $qx, $rs1", + [(int_riscv_esp_vsadds_u8 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSAT_S16: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.s16\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_s16_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_s16 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_S32: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.s32\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_S32_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_s32_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_s32 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_S8: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.s8\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_s8_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_s8 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_U16: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.u16\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_U16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_u16_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_u16 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_U32: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.u32\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_U32_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_u32_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_u32 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSAT_U8: Esp32P4Inst<(outs QR:$qz), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qx), + "esp.vsat.u8\t $qz, $qx, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSAT_U8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qx, imm8:$qz), + "!esp_vsat_u8_p $qz, $qx, $rs1, $rs2", + [(int_riscv_esp_vsat_u8 GPRPIE:$rs1, GPRPIE:$rs2, timm:$qx, timm:$qz)]>; + +def ESP_VSSUBS_S16: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vssubs.s16\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSSUBS_S16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vssubs_s16_p $qv, $qx, $rs1", + [(int_riscv_esp_vssubs_s16 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSSUBS_S8: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vssubs.s8\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSSUBS_S8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vssubs_s8_p $qv, $qx, $rs1", + [(int_riscv_esp_vssubs_s8 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSSUBS_U16: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vssubs.u16\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSSUBS_U16_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vssubs_u16_p $qv, $qx, $rs1", + [(int_riscv_esp_vssubs_u16 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSSUBS_U8: Esp32P4Inst<(outs QR:$qv), (ins GPRPIE:$rs1, QR:$qx), + "esp.vssubs.u8\t $qv, $qx, $rs1", []> +{ + bits<5> rs1; + bits<3> qx; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSSUBS_U8_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qx, imm8:$qv), + "!esp_vssubs_u8_p $qv, $qx, $rs1", + [(int_riscv_esp_vssubs_u8 GPRPIE:$rs1, timm:$qx, timm:$qv)]>; + +def ESP_VSUB_S16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.s16\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_s16_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_s16 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_S16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.s16.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_s16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_S16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.s16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_s16_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_S32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.s32\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_s32_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_s32 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_S32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.s32.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_s32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_S32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.s32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_s32_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_S8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.s8\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_s8_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_s8 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_S8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.s8.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_s8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_S8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.s8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 1; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_S8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_s8_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_s8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_U16: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.u16\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_u16_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_u16 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_U16_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.u16.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_u16_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u16_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_U16_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.u16.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_u16_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u16_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_U32: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.u32\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_u32_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_u32 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_U32_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.u32.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U32_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_u32_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u32_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_U32_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.u32.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U32_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_u32_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u32_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_VSUB_U8: Esp32P4Inst<(outs QR:$qv), (ins QR:$qx, QR:$qy), + "esp.vsub.u8\t $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qv), + "!esp_vsub_u8_p $qv, $qx, $qy", + [(int_riscv_esp_vsub_u8 timm:$qx, timm:$qy, timm:$qv)]>; + +def ESP_VSUB_U8_LD_INCP: Esp32P4Inst<(outs QR:$qv, QR:$qu, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1), + "esp.vsub.u8.ld.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> qv; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U8_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, imm8:$qv, imm8:$qu), + "!esp_vsub_u8_ld_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u8_ld_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$qv, timm:$qu)]>; + +def ESP_VSUB_U8_ST_INCP: Esp32P4Inst<(outs QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1), + "esp.vsub.u8.st.incp\t $qu, $rs1, $qv, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSUB_U8_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, imm8:$qv), + "!esp_vsub_u8_st_incp_p $qu, $rs1, $qv, $qx, $qy", + [(int_riscv_esp_vsub_u8_st_incp timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$qv)]>; + +def ESP_ADDX2: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), + "esp.addx2\t $rd, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24-20} = rs2{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-7} = rd{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ADDX2_P : PseudoESP32P4<(outs), (ins GPR:$rs1, GPR:$rs2, GPR:$rd), + "!esp_addx2_p $rd, $rs1, $rs2", + [(int_riscv_esp_addx2 GPR:$rs1, GPR:$rs2, GPR:$rd)]>; + +def ESP_ADDX4: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), + "esp.addx4\t $rd, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24-20} = rs2{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-7} = rd{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ADDX4_P : PseudoESP32P4<(outs), (ins GPR:$rs1, GPR:$rs2, GPR:$rd), + "!esp_addx4_p $rd, $rs1, $rs2", + [(int_riscv_esp_addx4 GPR:$rs1, GPR:$rs2, GPR:$rd)]>; + +def ESP_SAT: Esp32P4Inst<(outs GPR:$rsdr), (ins GPR:$rs0, GPR:$rs1, GPR:$rsd), + "esp.sat\t $rsd, $rs0, $rs1", []> +{ + bits<5> rs0; + bits<5> rs1; + bits<5> rsd; + bits<5> rsdr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rsdr = $rsd"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24-20} = rsd{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11-7} = rs0{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SAT_P : PseudoESP32P4<(outs), (ins GPR:$rs0, GPR:$rs1, GPR:$rsd), + "!esp_sat_p $rsd, $rs0, $rs1", + [(int_riscv_esp_sat GPR:$rs0, GPR:$rs1, GPR:$rsd)]>; + +def ESP_SUBX2: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), + "esp.subx2\t $rd, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24-20} = rs2{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-7} = rd{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SUBX2_P : PseudoESP32P4<(outs), (ins GPR:$rs1, GPR:$rs2, GPR:$rd), + "!esp_subx2_p $rd, $rs1, $rs2", + [(int_riscv_esp_subx2 GPR:$rs1, GPR:$rs2, GPR:$rd)]>; + +def ESP_SUBX4: Esp32P4Inst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), + "esp.subx4\t $rd, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24-20} = rs2{4-0}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-7} = rd{4-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 0; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SUBX4_P : PseudoESP32P4<(outs), (ins GPR:$rs1, GPR:$rs2, GPR:$rd), + "!esp_subx4_p $rd, $rs1, $rs2", + [(int_riscv_esp_subx4 GPR:$rs1, GPR:$rs2, GPR:$rd)]>; + +def ESP_ANDQ: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.andq\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ANDQ_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_andq_p $qz, $qx, $qy", + [(int_riscv_esp_andq timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_NOTQ: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx), + "esp.notq\t $qz, $qx", []> +{ + bits<3> qx; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_NOTQ_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qz), + "!esp_notq_p $qz, $qx", + [(int_riscv_esp_notq timm:$qx, timm:$qz)]>; + +def ESP_ORQ: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.orq\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ORQ_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_orq_p $qz, $qx, $qy", + [(int_riscv_esp_orq timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_XORQ: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.xorq\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_XORQ_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_xorq_p $qz, $qx, $qy", + [(int_riscv_esp_xorq timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_EQ_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.eq.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_EQ_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_eq_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_eq_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_GT_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.gt.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_GT_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_gt_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_gt_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_S16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.s16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_s16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_s16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_S32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.s32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_S32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_s32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_s32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_S8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.s8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_S8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_s8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_s8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_U16: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.u16\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_U16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_u16_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_u16 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_U32: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.u32\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_U32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_u32_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_u32 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_VCMP_LT_U8: Esp32P4Inst<(outs QR:$qz), (ins QR:$qx, QR:$qy), + "esp.vcmp.lt.u8\t $qz, $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VCMP_LT_U8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qz), + "!esp_vcmp_lt_u8_p $qz, $qx, $qy", + [(int_riscv_esp_vcmp_lt_u8 timm:$qx, timm:$qy, timm:$qz)]>; + +def ESP_MOV_S16_QACC: Esp32P4Inst<(outs), (ins QR:$qu), + "esp.mov.s16.qacc\t $qu", []> +{ + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOV_S16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qu), + "!esp_mov_s16_qacc_p $qu", + [(int_riscv_esp_mov_s16_qacc timm:$qu)]>; + +def ESP_MOV_S8_QACC: Esp32P4Inst<(outs), (ins QR:$qu), + "esp.mov.s8.qacc\t $qu", []> +{ + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOV_S8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qu), + "!esp_mov_s8_qacc_p $qu", + [(int_riscv_esp_mov_s8_qacc timm:$qu)]>; + +def ESP_MOV_U16_QACC: Esp32P4Inst<(outs), (ins QR:$qu), + "esp.mov.u16.qacc\t $qu", []> +{ + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOV_U16_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qu), + "!esp_mov_u16_qacc_p $qu", + [(int_riscv_esp_mov_u16_qacc timm:$qu)]>; + +def ESP_MOV_U8_QACC: Esp32P4Inst<(outs), (ins QR:$qu), + "esp.mov.u8.qacc\t $qu", []> +{ + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOV_U8_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qu), + "!esp_mov_u8_qacc_p $qu", + [(int_riscv_esp_mov_u8_qacc timm:$qu)]>; + +def ESP_MOVI_16_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qy, select_16:$sel16), + "esp.movi.16.a\t $qy, $rd, $sel16", []> +{ + bits<3> qy; + bits<4> sel16; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_16_A_P : PseudoESP32P4<(outs), (ins imm8:$qy, select_16:$sel16, GPRPIE:$rd), + "!esp_movi_16_a_p $qy, $rd, $sel16", + [(int_riscv_esp_movi_16_a timm:$qy, timm:$sel16, GPRPIE:$rd)]>; + +def ESP_MOVI_16_Q: Esp32P4Inst<(outs QR:$qy), (ins GPRPIE:$rs1, select_16:$sel16), + "esp.movi.16.q\t $qy, $rs1, $sel16", []> +{ + bits<5> rs1; + bits<4> sel16; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10-7} = sel16{3-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_16_Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_16:$sel16, imm8:$qy), + "!esp_movi_16_q_p $qy, $rs1, $sel16", + [(int_riscv_esp_movi_16_q GPRPIE:$rs1, timm:$sel16, timm:$qy)]>; + +def ESP_MOVI_32_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qy, select_4:$sel4), + "esp.movi.32.a\t $qy, $rd, $sel4", []> +{ + bits<3> qy; + bits<2> sel4; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = sel4{1}; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = sel4{0}; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_32_A_P : PseudoESP32P4<(outs), (ins imm8:$qy, select_4:$sel4, GPRPIE:$rd), + "!esp_movi_32_a_p $qy, $rd, $sel4", + [(int_riscv_esp_movi_32_a timm:$qy, timm:$sel4, GPRPIE:$rd)]>; + +def ESP_MOVI_32_Q: Esp32P4Inst<(outs QR:$qy), (ins GPRPIE:$rs1, select_4:$sel4), + "esp.movi.32.q\t $qy, $rs1, $sel4", []> +{ + bits<5> rs1; + bits<2> sel4; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10-9} = sel4{1-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_32_Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_4:$sel4, imm8:$qy), + "!esp_movi_32_q_p $qy, $rs1, $sel4", + [(int_riscv_esp_movi_32_q GPRPIE:$rs1, timm:$sel4, timm:$qy)]>; + +def ESP_MOVI_8_A: Esp32P4Inst<(outs GPRPIE:$rd), (ins QR:$qy, select_16:$sel16), + "esp.movi.8.a\t $qy, $rd, $sel16", []> +{ + bits<3> qy; + bits<4> sel16; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18-15} = sel16{3-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_8_A_P : PseudoESP32P4<(outs), (ins imm8:$qy, select_16:$sel16, GPRPIE:$rd), + "!esp_movi_8_a_p $qy, $rd, $sel16", + [(int_riscv_esp_movi_8_a timm:$qy, timm:$sel16, GPRPIE:$rd)]>; + +def ESP_MOVI_8_Q: Esp32P4Inst<(outs QR:$qy), (ins GPRPIE:$rs1, select_16:$sel16), + "esp.movi.8.q\t $qy, $rs1, $sel16", []> +{ + bits<5> rs1; + bits<4> sel16; + bits<3> qy; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10-7} = sel16{3-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVI_8_Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_16:$sel16, imm8:$qy), + "!esp_movi_8_q_p $qy, $rs1, $sel16", + [(int_riscv_esp_movi_8_q GPRPIE:$rs1, timm:$sel16, timm:$qy)]>; + +def ESP_MOVX_R_CFG: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.cfg\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_CFG_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_cfg_p $rd", + [(int_riscv_esp_movx_r_cfg GPRPIE:$rd)]>; + +def ESP_MOVX_R_FFT_BIT_WIDTH: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.fft.bit.width\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_FFT_BIT_WIDTH_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_fft_bit_width_p $rd", + [(int_riscv_esp_movx_r_fft_bit_width GPRPIE:$rd)]>; + +def ESP_MOVX_R_PERF: Esp32P4Inst<(outs GPRPIE:$rd), (ins GPRPIE:$rs1), + "esp.movx.r.perf\t $rd, $rs1", []> +{ + bits<5> rs1; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_PERF_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rd), + "!esp_movx_r_perf_p $rd, $rs1", + [(int_riscv_esp_movx_r_perf GPRPIE:$rs1, GPRPIE:$rd)]>; + +def ESP_MOVX_R_SAR: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.sar\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_SAR_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_sar_p $rd", + [(int_riscv_esp_movx_r_sar GPRPIE:$rd)]>; + +def ESP_MOVX_R_SAR_BYTES: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.sar.bytes\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_SAR_BYTES_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_sar_bytes_p $rd", + [(int_riscv_esp_movx_r_sar_bytes GPRPIE:$rd)]>; + +def ESP_MOVX_R_XACC_H: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.xacc.h\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_XACC_H_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_xacc_h_p $rd", + [(int_riscv_esp_movx_r_xacc_h GPRPIE:$rd)]>; + +def ESP_MOVX_R_XACC_L: Esp32P4Inst<(outs GPRPIE:$rd), (ins), + "esp.movx.r.xacc.l\t $rd", []> +{ + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_R_XACC_L_P : PseudoESP32P4<(outs), (ins GPRPIE:$rd), + "!esp_movx_r_xacc_l_p $rd", + [(int_riscv_esp_movx_r_xacc_l GPRPIE:$rd)]>; + +def ESP_MOVX_W_CFG: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.cfg\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_CFG_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_cfg_p $rs1", + [(int_riscv_esp_movx_w_cfg GPRPIE:$rs1)]>; + +def ESP_MOVX_W_FFT_BIT_WIDTH: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.fft.bit.width\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_FFT_BIT_WIDTH_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_fft_bit_width_p $rs1", + [(int_riscv_esp_movx_w_fft_bit_width GPRPIE:$rs1)]>; + +def ESP_MOVX_W_PERF: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.perf\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_PERF_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_perf_p $rs1", + [(int_riscv_esp_movx_w_perf GPRPIE:$rs1)]>; + +def ESP_MOVX_W_SAR: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.sar\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_SAR_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_sar_p $rs1", + [(int_riscv_esp_movx_w_sar GPRPIE:$rs1)]>; + +def ESP_MOVX_W_SAR_BYTES: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.sar.bytes\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_SAR_BYTES_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_sar_bytes_p $rs1", + [(int_riscv_esp_movx_w_sar_bytes GPRPIE:$rs1)]>; + +def ESP_MOVX_W_XACC_H: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.xacc.h\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_XACC_H_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_xacc_h_p $rs1", + [(int_riscv_esp_movx_w_xacc_h GPRPIE:$rs1)]>; + +def ESP_MOVX_W_XACC_L: Esp32P4Inst<(outs), (ins GPRPIE:$rs1), + "esp.movx.w.xacc.l\t $rs1", []> +{ + bits<5> rs1; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_MOVX_W_XACC_L_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1), + "!esp_movx_w_xacc_l_p $rs1", + [(int_riscv_esp_movx_w_xacc_l GPRPIE:$rs1)]>; + +def ESP_VEXT_S16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qw), + "esp.vext.s16\t $qz, $qv, $qw", []> +{ + bits<3> qw; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VEXT_S16_P : PseudoESP32P4<(outs), (ins imm8:$qw, imm8:$qz, imm8:$qv), + "!esp_vext_s16_p $qz, $qv, $qw", + [(int_riscv_esp_vext_s16 timm:$qw, timm:$qz, timm:$qv)]>; + +def ESP_VEXT_S8: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qw), + "esp.vext.s8\t $qz, $qv, $qw", []> +{ + bits<3> qw; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VEXT_S8_P : PseudoESP32P4<(outs), (ins imm8:$qw, imm8:$qz, imm8:$qv), + "!esp_vext_s8_p $qz, $qv, $qw", + [(int_riscv_esp_vext_s8 timm:$qw, timm:$qz, timm:$qv)]>; + +def ESP_VEXT_U16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qw), + "esp.vext.u16\t $qz, $qv, $qw", []> +{ + bits<3> qw; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VEXT_U16_P : PseudoESP32P4<(outs), (ins imm8:$qw, imm8:$qz, imm8:$qv), + "!esp_vext_u16_p $qz, $qv, $qw", + [(int_riscv_esp_vext_u16 timm:$qw, timm:$qz, timm:$qv)]>; + +def ESP_VEXT_U8: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qw), + "esp.vext.u8\t $qz, $qv, $qw", []> +{ + bits<3> qw; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VEXT_U8_P : PseudoESP32P4<(outs), (ins imm8:$qw, imm8:$qz, imm8:$qv), + "!esp_vext_u8_p $qz, $qv, $qw", + [(int_riscv_esp_vext_u8 timm:$qw, timm:$qz, timm:$qv)]>; + +def ESP_VUNZIP_16: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vunzip.16\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIP_16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vunzip_16_p $qx, $qy", + [(int_riscv_esp_vunzip_16 timm:$qx, timm:$qy)]>; + +def ESP_VUNZIP_32: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vunzip.32\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIP_32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vunzip_32_p $qx, $qy", + [(int_riscv_esp_vunzip_32 timm:$qx, timm:$qy)]>; + +def ESP_VUNZIP_8: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vunzip.8\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIP_8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vunzip_8_p $qx, $qy", + [(int_riscv_esp_vunzip_8 timm:$qx, timm:$qy)]>; + +def ESP_VUNZIPT_16: Esp32P4Inst<(outs QR:$qxr, QR:$qyr, QR:$qwr), (ins QR:$qx, QR:$qy, QR:$qw), + "esp.vunzipt.16\t $qx, $qy, $qw", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qxr; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy, $qwr = $qw"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIPT_16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw), + "!esp_vunzipt_16_p $qx, $qy, $qw", + [(int_riscv_esp_vunzipt_16 timm:$qx, timm:$qy, timm:$qw)]>; + +def ESP_VUNZIPT_8: Esp32P4Inst<(outs QR:$qxr, QR:$qyr, QR:$qwr), (ins QR:$qx, QR:$qy, QR:$qw), + "esp.vunzipt.8\t $qx, $qy, $qw", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qxr; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy, $qwr = $qw"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VUNZIPT_8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw), + "!esp_vunzipt_8_p $qx, $qy, $qw", + [(int_riscv_esp_vunzipt_8 timm:$qx, timm:$qy, timm:$qw)]>; + +def ESP_VZIP_16: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vzip.16\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 1; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIP_16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vzip_16_p $qx, $qy", + [(int_riscv_esp_vzip_16 timm:$qx, timm:$qy)]>; + +def ESP_VZIP_32: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vzip.32\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 1; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIP_32_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vzip_32_p $qx, $qy", + [(int_riscv_esp_vzip_32 timm:$qx, timm:$qy)]>; + +def ESP_VZIP_8: Esp32P4Inst<(outs QR:$qxr, QR:$qyr), (ins QR:$qx, QR:$qy), + "esp.vzip.8\t $qx, $qy", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qxr; + bits<3> qyr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIP_8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy), + "!esp_vzip_8_p $qx, $qy", + [(int_riscv_esp_vzip_8 timm:$qx, timm:$qy)]>; + +def ESP_VZIPT_16: Esp32P4Inst<(outs QR:$qxr, QR:$qyr, QR:$qwr), (ins QR:$qx, QR:$qy, QR:$qw), + "esp.vzipt.16\t $qx, $qy, $qw", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qxr; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy, $qwr = $qw"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIPT_16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw), + "!esp_vzipt_16_p $qx, $qy, $qw", + [(int_riscv_esp_vzipt_16 timm:$qx, timm:$qy, timm:$qw)]>; + +def ESP_VZIPT_8: Esp32P4Inst<(outs QR:$qxr, QR:$qyr, QR:$qwr), (ins QR:$qx, QR:$qy, QR:$qw), + "esp.vzipt.8\t $qx, $qy, $qw", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qxr; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qxr = $qx, $qyr = $qy, $qwr = $qw"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VZIPT_8_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw), + "!esp_vzipt_8_p $qx, $qy, $qw", + [(int_riscv_esp_vzipt_8 timm:$qx, timm:$qy, timm:$qw)]>; + +def ESP_ZERO_Q: Esp32P4Inst<(outs QR:$qz), (ins), + "esp.zero.q\t $qz", []> +{ + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ZERO_Q_P : PseudoESP32P4<(outs), (ins imm8:$qz), + "!esp_zero_q_p $qz", + [(int_riscv_esp_zero_q timm:$qz)]>; + +def ESP_ZERO_QACC: Esp32P4Inst<(outs), (ins), + "esp.zero.qacc\t", []> +{ + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ZERO_QACC_P : PseudoESP32P4<(outs), (ins), + "!esp_zero_qacc_p", + [(int_riscv_esp_zero_qacc)]>; + +def ESP_ZERO_XACC: Esp32P4Inst<(outs), (ins), + "esp.zero.xacc\t", []> +{ + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ZERO_XACC_P : PseudoESP32P4<(outs), (ins), + "!esp_zero_xacc_p", + [(int_riscv_esp_zero_xacc)]>; + +def ESP_FFT_AMS_S16_LD_INCP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qw, GPRPIE:$rs1, select_2:$sel2), + "esp.fft.ams.s16.ld.incp\t $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + bits<3> qz; + bits<3> qv; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = sel2{0}; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_AMS_S16_LD_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), + "!esp_fft_ams_s16_ld_incp_p $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", + [(int_riscv_esp_fft_ams_s16_ld_incp timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv)]>; + +def ESP_FFT_AMS_S16_LD_INCP_UAUP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qw, GPRPIE:$rs1, select_2:$sel2), + "esp.fft.ams.s16.ld.incp.uaup\t $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + bits<3> qz; + bits<3> qv; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = sel2{0}; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_AMS_S16_LD_INCP_UAUP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), + "!esp_fft_ams_s16_ld_incp_uaup_p $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", + [(int_riscv_esp_fft_ams_s16_ld_incp_uaup timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv)]>; + +def ESP_FFT_AMS_S16_LD_R32_DECP: Esp32P4Inst<(outs QR:$qu, QR:$qz, QR:$qv, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, QR:$qw, GPRPIE:$rs1, select_2:$sel2), + "esp.fft.ams.s16.ld.r32.decp\t $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + bits<3> qz; + bits<3> qv; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = sel2{0}; + let Inst{22-20} = qv{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_AMS_S16_LD_R32_DECP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, GPRPIE:$rs1, select_2:$sel2, imm8:$qu, imm8:$qz, imm8:$qv), + "!esp_fft_ams_s16_ld_r32_decp_p $qu, $rs1, $qz, $qv, $qx, $qw, $qy, $sel2", + [(int_riscv_esp_fft_ams_s16_ld_r32_decp timm:$qx, timm:$qy, timm:$qw, GPRPIE:$rs1, timm:$sel2, timm:$qu, timm:$qz, timm:$qv)]>; + +def ESP_FFT_AMS_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r, GPRPIE:$rs2r), (ins QR:$qx, QR:$qy, QR:$qw, QR:$qu, GPRPIE:$rs1, GPRPIE:$rs2, select_2:$sel2), + "esp.fft.ams.s16.st.incp\t $qu, $qz, $rs2, $rs1, $qx, $qw, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<3> qw; + bits<3> qu; + bits<5> rs1; + bits<5> rs2; + bits<1> sel2; + bits<3> qz; + bits<5> rs1r; + bits<5> rs2r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1, $rs2r = $rs2"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = sel2{0}; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_AMS_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, imm8:$qw, imm8:$qu, GPRPIE:$rs1, GPRPIE:$rs2, select_2:$sel2, imm8:$qz), + "!esp_fft_ams_s16_st_incp_p $qu, $qz, $rs2, $rs1, $qx, $qw, $qy, $sel2", + [(int_riscv_esp_fft_ams_s16_st_incp timm:$qx, timm:$qy, timm:$qw, timm:$qu, GPRPIE:$rs1, GPRPIE:$rs2, timm:$sel2, timm:$qz)]>; + +def ESP_FFT_BITREV: Esp32P4Inst<(outs GPRPIE:$rs1r, QR:$qvr), (ins GPRPIE:$rs1, QR:$qv), + "esp.fft.bitrev\t $qv, $rs1", []> +{ + bits<5> rs1; + bits<3> qv; + bits<5> rs1r; + bits<3> qvr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1, $qvr = $qv"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_BITREV_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qv), + "!esp_fft_bitrev_p $qv, $rs1", + [(int_riscv_esp_fft_bitrev GPRPIE:$rs1, timm:$qv)]>; + +def ESP_FFT_CMUL_S16_LD_XP: Esp32P4Inst<(outs QR:$qz, QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, GPRPIE:$rs1, select_8:$sel8), + "esp.fft.cmul.s16.ld.xp\t $qu, $rs1, $rs2, $qz, $qy, $qx, $sel8", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<3> sel8; + bits<3> qz; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = sel8{2-1}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = sel8{0}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_CMUL_S16_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_8:$sel8, imm8:$qz, imm8:$qu), + "!esp_fft_cmul_s16_ld_xp_p $qu, $rs1, $rs2, $qz, $qy, $qx, $sel8", + [(int_riscv_esp_fft_cmul_s16_ld_xp GPRPIE:$rs2, timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel8, timm:$qz, timm:$qu)]>; + +def ESP_FFT_CMUL_S16_ST_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qx, QR:$qy, QR:$qu, GPRPIE:$rs1, select_4:$sel4, select_4:$upd4, select_8:$sel8), + "esp.fft.cmul.s16.st.xp\t $qy, $qx, $qu, $rs1, $rs2, $sel8, $upd4, $sel4", []> +{ + bits<5> rs2; + bits<3> qx; + bits<3> qy; + bits<3> qu; + bits<5> rs1; + bits<2> sel4; + bits<2> upd4; + bits<3> sel8; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = sel4{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = upd4{1}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = upd4{0}; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = sel8{2-0}; + let Inst{6} = 1; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_CMUL_S16_ST_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qx, imm8:$qy, imm8:$qu, GPRPIE:$rs1, select_4:$sel4, select_4:$upd4, select_8:$sel8), + "!esp_fft_cmul_s16_st_xp_p $qy, $qx, $qu, $rs1, $rs2, $sel8, $upd4, $sel4", + [(int_riscv_esp_fft_cmul_s16_st_xp GPRPIE:$rs2, timm:$qx, timm:$qy, timm:$qu, GPRPIE:$rs1, timm:$sel4, timm:$upd4, timm:$sel8)]>; + +def ESP_FFT_R2BF_S16: Esp32P4Inst<(outs QR:$qz, QR:$qv), (ins QR:$qx, QR:$qy, select_2:$sel2), + "esp.fft.r2bf.s16\t $qz, $qv, $qx, $qy, $sel2", []> +{ + bits<3> qx; + bits<3> qy; + bits<1> sel2; + bits<3> qz; + bits<3> qv; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22-20} = qv{2-0}; + let Inst{19} = 0; + let Inst{18} = sel2{0}; + let Inst{17} = 0; + let Inst{16} = 1; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_R2BF_S16_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, select_2:$sel2, imm8:$qz, imm8:$qv), + "!esp_fft_r2bf_s16_p $qz, $qv, $qx, $qy, $sel2", + [(int_riscv_esp_fft_r2bf_s16 timm:$qx, timm:$qy, timm:$sel2, timm:$qz, timm:$qv)]>; + +def ESP_FFT_R2BF_S16_ST_INCP: Esp32P4Inst<(outs QR:$qz, GPRPIE:$rs1r), (ins QR:$qx, QR:$qy, GPRPIE:$rs1, select_4:$sel4), + "esp.fft.r2bf.s16.st.incp\t $qz, $qx, $qy, $rs1, $sel4", []> +{ + bits<3> qx; + bits<3> qy; + bits<5> rs1; + bits<2> sel4; + bits<3> qz; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = qx{2-0}; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-22} = sel4{1-0}; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_R2BF_S16_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qx, imm8:$qy, GPRPIE:$rs1, select_4:$sel4, imm8:$qz), + "!esp_fft_r2bf_s16_st_incp_p $qz, $qx, $qy, $rs1, $sel4", + [(int_riscv_esp_fft_r2bf_s16_st_incp timm:$qx, timm:$qy, GPRPIE:$rs1, timm:$sel4, timm:$qz)]>; + +def ESP_FFT_VST_R32_DECP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, select_2:$sel2), + "esp.fft.vst.r32.decp\t $qu, $rs1, $sel2", []> +{ + bits<3> qu; + bits<5> rs1; + bits<1> sel2; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = sel2{0}; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_FFT_VST_R32_DECP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, select_2:$sel2), + "!esp_fft_vst_r32_decp_p $qu, $rs1, $sel2", + [(int_riscv_esp_fft_vst_r32_decp timm:$qu, GPRPIE:$rs1, timm:$sel2)]>; + +def ESP_LD_128_USAR_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.128.usar.ip\t $qu, $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = off25616{7-5}; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off25616{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_128_USAR_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616, imm8:$qu), + "!esp_ld_128_usar_ip_p $qu, $rs1, $off25616", + [(int_riscv_esp_ld_128_usar_ip GPRPIE:$rs1, timm:$off25616, timm:$qu)]>; + +def ESP_LD_128_USAR_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ld.128.usar.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_128_USAR_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_ld_128_usar_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_ld_128_usar_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_LD_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.ld.xacc.ip\t $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = off2568{7}; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off2568{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_XACC_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_ld_xacc_ip_p $rs1, $off2568", + [(int_riscv_esp_ld_xacc_ip GPRPIE:$rs1, timm:$off2568)]>; + +def ESP_LDQA_S16_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ldqa.s16.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_S16_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ldqa_s16_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ldqa_s16_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDQA_S16_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ldqa.s16.128.xp\t $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_S16_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "!esp_ldqa_s16_128_xp_p $rs1, $rs2", + [(int_riscv_esp_ldqa_s16_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + +def ESP_LDQA_S8_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ldqa.s8.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_S8_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ldqa_s8_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ldqa_s8_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDQA_S8_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ldqa.s8.128.xp\t $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_S8_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "!esp_ldqa_s8_128_xp_p $rs1, $rs2", + [(int_riscv_esp_ldqa_s8_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + +def ESP_LDQA_U16_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ldqa.u16.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_U16_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ldqa_u16_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ldqa_u16_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDQA_U16_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ldqa.u16.128.xp\t $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 1; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_U16_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "!esp_ldqa_u16_128_xp_p $rs1, $rs2", + [(int_riscv_esp_ldqa_u16_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + +def ESP_LDQA_U8_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ldqa.u8.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_U8_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ldqa_u8_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ldqa_u8_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDQA_U8_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.ldqa.u8.128.xp\t $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 1; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDQA_U8_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "!esp_ldqa_u8_128_xp_p $rs1, $rs2", + [(int_riscv_esp_ldqa_u8_128_xp GPRPIE:$rs2, GPRPIE:$rs1)]>; + +def ESP_VLDBC_16_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564), + "esp.vldbc.16.ip\t $qu, $rs1, $off2564", []> +{ + bits<5> rs1; + bits<8> off2564; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30-28} = off2564{7-5}; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off2564{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2564{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_16_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), + "!esp_vldbc_16_ip_p $qu, $rs1, $off2564", + [(int_riscv_esp_vldbc_16_ip GPRPIE:$rs1, timm:$off2564, timm:$qu)]>; + +def ESP_VLDBC_16_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldbc.16.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_16_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vldbc_16_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vldbc_16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLDBC_32_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564), + "esp.vldbc.32.ip\t $qu, $rs1, $off2564", []> +{ + bits<5> rs1; + bits<8> off2564; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31-29} = off2564{7-5}; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off2564{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2564{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_32_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), + "!esp_vldbc_32_ip_p $qu, $rs1, $off2564", + [(int_riscv_esp_vldbc_32_ip GPRPIE:$rs1, timm:$off2564, timm:$qu)]>; + +def ESP_VLDBC_32_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldbc.32.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_32_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vldbc_32_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vldbc_32_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLDBC_8_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_4:$off2564), + "esp.vldbc.8.ip\t $qu, $rs1, $off2564", []> +{ + bits<5> rs1; + bits<8> off2564; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30-28} = off2564{7-5}; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off2564{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2564{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_8_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_4:$off2564, imm8:$qu), + "!esp_vldbc_8_ip_p $qu, $rs1, $off2564", + [(int_riscv_esp_vldbc_8_ip GPRPIE:$rs1, timm:$off2564, timm:$qu)]>; + +def ESP_VLDBC_8_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldbc.8.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDBC_8_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vldbc_8_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vldbc_8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLDEXT_S16_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vldext.s16.ip\t $qu, $qz, $rs1, $off1616", []> +{ + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off1616{3-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_S16_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), + "!esp_vldext_s16_ip_p $qu, $qz, $rs1, $off1616", + [(int_riscv_esp_vldext_s16_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_S16_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldext.s16.xp\t $qu, $qz, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_S16_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldext_s16_xp_p $qu, $qz, $rs1, $rs2", + [(int_riscv_esp_vldext_s16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_S8_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vldext.s8.ip\t $qu, $qz, $rs1, $off1616", []> +{ + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off1616{3-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_S8_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), + "!esp_vldext_s8_ip_p $qu, $qz, $rs1, $off1616", + [(int_riscv_esp_vldext_s8_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_S8_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldext.s8.xp\t $qu, $qz, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_S8_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldext_s8_xp_p $qu, $qz, $rs1, $rs2", + [(int_riscv_esp_vldext_s8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_U16_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vldext.u16.ip\t $qu, $qz, $rs1, $off1616", []> +{ + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off1616{3-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_U16_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), + "!esp_vldext_u16_ip_p $qu, $qz, $rs1, $off1616", + [(int_riscv_esp_vldext_u16_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_U16_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldext.u16.xp\t $qu, $qz, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_U16_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldext_u16_xp_p $qu, $qz, $rs1, $rs2", + [(int_riscv_esp_vldext_u16_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_U8_IP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_16_16:$off1616), + "esp.vldext.u8.ip\t $qu, $qz, $rs1, $off1616", []> +{ + bits<5> rs1; + bits<4> off1616; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off1616{3-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_U8_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_16_16:$off1616, imm8:$qu, imm8:$qz), + "!esp_vldext_u8_ip_p $qu, $qz, $rs1, $off1616", + [(int_riscv_esp_vldext_u8_ip GPRPIE:$rs1, timm:$off1616, timm:$qu, timm:$qz)]>; + +def ESP_VLDEXT_U8_XP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vldext.u8.xp\t $qu, $qz, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDEXT_U8_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldext_u8_xp_p $qu, $qz, $rs1, $rs2", + [(int_riscv_esp_vldext_u8_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_VLDHBC_16_INCP: Esp32P4Inst<(outs QR:$qu, QR:$qz, GPRPIE:$rs1r), (ins GPRPIE:$rs1), + "esp.vldhbc.16.incp\t $qu, $qz, $rs1", []> +{ + bits<5> rs1; + bits<3> qu; + bits<3> qz; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLDHBC_16_INCP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qu, imm8:$qz), + "!esp_vldhbc_16_incp_p $qu, $qz, $rs1", + [(int_riscv_esp_vldhbc_16_incp GPRPIE:$rs1, timm:$qu, timm:$qz)]>; + +def ESP_LD_QACC_H_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.qacc.h.h.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_QACC_H_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_qacc_h_h_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_qacc_h_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LD_QACC_H_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.qacc.h.l.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_QACC_H_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_qacc_h_l_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_qacc_h_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LD_QACC_L_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.qacc.l.h.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_QACC_L_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_qacc_l_h_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_qacc_l_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LD_QACC_L_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.qacc.l.l.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_QACC_L_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_qacc_l_l_128_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_qacc_l_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LD_UA_STATE_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.ld.ua.state.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = off25616{7}; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off25616{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LD_UA_STATE_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_ld_ua_state_ip_p $rs1, $off25616", + [(int_riscv_esp_ld_ua_state_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_LDXQ_32: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, QR:$qw, select_4:$sel4, select_8:$sel8), + "esp.ldxq.32\t $qu, $qw, $rs1, $sel4, $sel8", []> +{ + bits<5> rs1; + bits<3> qw; + bits<2> sel4; + bits<3> sel8; + bits<3> qu; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30-29} = sel4{1-0}; + let Inst{28-26} = sel8{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_LDXQ_32_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qw, select_4:$sel4, select_8:$sel8, imm8:$qu), + "!esp_ldxq_32_p $qu, $qw, $rs1, $sel4, $sel8", + [(int_riscv_esp_ldxq_32 GPRPIE:$rs1, timm:$qw, timm:$sel4, timm:$sel8, timm:$qu)]>; + +def ESP_ST_QACC_H_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.qacc.h.h.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_QACC_H_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_qacc_h_h_128_ip_p $rs1, $off25616", + [(int_riscv_esp_st_qacc_h_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_ST_QACC_H_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.qacc.h.l.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_QACC_H_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_qacc_h_l_128_ip_p $rs1, $off25616", + [(int_riscv_esp_st_qacc_h_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_ST_QACC_L_H_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.qacc.l.h.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_QACC_L_H_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_qacc_l_h_128_ip_p $rs1, $off25616", + [(int_riscv_esp_st_qacc_l_h_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_ST_QACC_L_L_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.qacc.l.l.128.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{7-4}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-9} = off25616{3-0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_QACC_L_L_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_qacc_l_l_128_ip_p $rs1, $off25616", + [(int_riscv_esp_st_qacc_l_l_128_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_ST_UA_STATE_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.st.ua.state.ip\t $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = off25616{7}; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off25616{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off25616{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_UA_STATE_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_st_ua_state_ip_p $rs1, $off25616", + [(int_riscv_esp_st_ua_state_ip GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_STXQ_32: Esp32P4Inst<(outs), (ins GPRPIE:$rs1, QR:$qw, QR:$qu, select_4:$sel4, select_8:$sel8), + "esp.stxq.32\t $qu, $qw, $rs1, $sel4, $sel8", []> +{ + bits<5> rs1; + bits<3> qw; + bits<3> qu; + bits<2> sel4; + bits<3> sel8; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30-29} = sel4{1-0}; + let Inst{28-26} = sel8{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_STXQ_32_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, imm8:$qw, imm8:$qu, select_4:$sel4, select_8:$sel8), + "!esp_stxq_32_p $qu, $qw, $rs1, $sel4, $sel8", + [(int_riscv_esp_stxq_32 GPRPIE:$rs1, timm:$qw, timm:$qu, timm:$sel4, timm:$sel8)]>; + +def ESP_VLD_128_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_16:$off25616), + "esp.vld.128.ip\t $qu, $rs1, $off25616", []> +{ + bits<5> rs1; + bits<8> off25616; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30-28} = off25616{7-5}; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off25616{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off25616{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_128_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_16:$off25616, imm8:$qu), + "!esp_vld_128_ip_p $qu, $rs1, $off25616", + [(int_riscv_esp_vld_128_ip GPRPIE:$rs1, timm:$off25616, timm:$qu)]>; + +def ESP_VLD_128_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vld.128.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vld_128_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vld_128_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLD_H_64_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.vld.h.64.ip\t $qu, $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29-27} = off2568{7-5}; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2568{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_H_64_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568, imm8:$qu), + "!esp_vld_h_64_ip_p $qu, $rs1, $off2568", + [(int_riscv_esp_vld_h_64_ip GPRPIE:$rs1, timm:$off2568, timm:$qu)]>; + +def ESP_VLD_H_64_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vld.h.64.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_H_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vld_h_64_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vld_h_64_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VLD_L_64_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.vld.l.64.ip\t $qu, $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29-27} = off2568{7-5}; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2568{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_L_64_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568, imm8:$qu), + "!esp_vld_l_64_ip_p $qu, $rs1, $off2568", + [(int_riscv_esp_vld_l_64_ip GPRPIE:$rs1, timm:$off2568, timm:$qu)]>; + +def ESP_VLD_L_64_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r), (ins GPRPIE:$rs2, GPRPIE:$rs1), + "esp.vld.l.64.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<5> rs1; + bits<3> qu; + bits<5> rs1r; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VLD_L_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, GPRPIE:$rs1, imm8:$qu), + "!esp_vld_l_64_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vld_l_64_xp GPRPIE:$rs2, GPRPIE:$rs1, timm:$qu)]>; + +def ESP_VST_128_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, offset_256_16:$off25616), + "esp.vst.128.ip\t $qu, $rs1, $off25616", []> +{ + bits<3> qu; + bits<5> rs1; + bits<8> off25616; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30-28} = off25616{7-5}; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-20} = off25616{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off25616{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_128_IP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, offset_256_16:$off25616), + "!esp_vst_128_ip_p $qu, $rs1, $off25616", + [(int_riscv_esp_vst_128_ip timm:$qu, GPRPIE:$rs1, timm:$off25616)]>; + +def ESP_VST_128_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, GPRPIE:$rs1), + "esp.vst.128.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_128_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), + "!esp_vst_128_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vst_128_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VST_H_64_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, offset_256_8:$off2568), + "esp.vst.h.64.ip\t $qu, $rs1, $off2568", []> +{ + bits<3> qu; + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29-27} = off2568{7-5}; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2568{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_H_64_IP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_vst_h_64_ip_p $qu, $rs1, $off2568", + [(int_riscv_esp_vst_h_64_ip timm:$qu, GPRPIE:$rs1, timm:$off2568)]>; + +def ESP_VST_H_64_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, GPRPIE:$rs1), + "esp.vst.h.64.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_H_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), + "!esp_vst_h_64_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vst_h_64_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_VST_L_64_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qu, GPRPIE:$rs1, offset_256_8:$off2568), + "esp.vst.l.64.ip\t $qu, $rs1, $off2568", []> +{ + bits<3> qu; + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29-27} = off2568{7-5}; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{4-1}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off2568{0}; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_L_64_IP_P : PseudoESP32P4<(outs), (ins imm8:$qu, GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_vst_l_64_ip_p $qu, $rs1, $off2568", + [(int_riscv_esp_vst_l_64_ip timm:$qu, GPRPIE:$rs1, timm:$off2568)]>; + +def ESP_VST_L_64_XP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs2, QR:$qu, GPRPIE:$rs1), + "esp.vst.l.64.xp\t $qu, $rs1, $rs2", []> +{ + bits<5> rs2; + bits<3> qu; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VST_L_64_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qu, GPRPIE:$rs1), + "!esp_vst_l_64_xp_p $qu, $rs1, $rs2", + [(int_riscv_esp_vst_l_64_xp GPRPIE:$rs2, timm:$qu, GPRPIE:$rs1)]>; + +def ESP_SLCI_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins QR:$qy, QR:$qw, select_16:$sel16), + "esp.slci.2q\t $qy, $qw, $sel16", []> +{ + bits<3> qy; + bits<3> qw; + bits<4> sel16; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = sel16{3}; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = sel16{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SLCI_2Q_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, select_16:$sel16), + "!esp_slci_2q_p $qy, $qw, $sel16", + [(int_riscv_esp_slci_2q timm:$qy, timm:$qw, timm:$sel16)]>; + +def ESP_SLCXXP_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qy, QR:$qw), + "esp.slcxxp.2q\t $qy, $qw, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qy; + bits<3> qw; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SLCXXP_2Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qy, imm8:$qw), + "!esp_slcxxp_2q_p $qy, $qw, $rs1, $rs2", + [(int_riscv_esp_slcxxp_2q GPRPIE:$rs1, GPRPIE:$rs2, timm:$qy, timm:$qw)]>; + +def ESP_SRC_Q: Esp32P4Inst<(outs QR:$qz), (ins QR:$qy, QR:$qw), + "esp.src.q\t $qz, $qw, $qy", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qz; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRC_Q_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qz), + "!esp_src_q_p $qz, $qw, $qy", + [(int_riscv_esp_src_q timm:$qy, timm:$qw, timm:$qz)]>; + +def ESP_SRC_Q_LD_IP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r, QR:$qwr), (ins QR:$qy, GPRPIE:$rs1, QR:$qw, offset_256_16:$off25616), + "esp.src.q.ld.ip\t $qu, $rs1, $off25616, $qw, $qy", []> +{ + bits<3> qy; + bits<5> rs1; + bits<3> qw; + bits<8> off25616; + bits<3> qu; + bits<5> rs1r; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1, $qwr = $qw"; + + let Inst{31-29} = off25616{7-5}; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23-20} = off25616{4-1}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = off25616{0}; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRC_Q_LD_IP_P : PseudoESP32P4<(outs), (ins imm8:$qy, GPRPIE:$rs1, imm8:$qw, offset_256_16:$off25616, imm8:$qu), + "!esp_src_q_ld_ip_p $qu, $rs1, $off25616, $qw, $qy", + [(int_riscv_esp_src_q_ld_ip timm:$qy, GPRPIE:$rs1, timm:$qw, timm:$off25616, timm:$qu)]>; + +def ESP_SRC_Q_LD_XP: Esp32P4Inst<(outs QR:$qu, GPRPIE:$rs1r, QR:$qwr), (ins GPRPIE:$rs2, QR:$qy, GPRPIE:$rs1, QR:$qw), + "esp.src.q.ld.xp\t $qu, $rs1, $rs2, $qw, $qy", []> +{ + bits<5> rs2; + bits<3> qy; + bits<5> rs1; + bits<3> qw; + bits<3> qu; + bits<5> rs1r; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 1; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRC_Q_LD_XP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs2, imm8:$qy, GPRPIE:$rs1, imm8:$qw, imm8:$qu), + "!esp_src_q_ld_xp_p $qu, $rs1, $rs2, $qw, $qy", + [(int_riscv_esp_src_q_ld_xp GPRPIE:$rs2, timm:$qy, GPRPIE:$rs1, timm:$qw, timm:$qu)]>; + +def ESP_SRC_Q_QUP: Esp32P4Inst<(outs QR:$qz, QR:$qwr), (ins QR:$qy, QR:$qw), + "esp.src.q.qup\t $qz, $qw, $qy", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qz; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qwr = $qw"; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9-7} = qz{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRC_Q_QUP_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qz), + "!esp_src_q_qup_p $qz, $qw, $qy", + [(int_riscv_esp_src_q_qup timm:$qy, timm:$qw, timm:$qz)]>; + +def ESP_SRCI_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins QR:$qy, QR:$qw, select_16:$sel16), + "esp.srci.2q\t $qy, $qw, $sel16", []> +{ + bits<3> qy; + bits<3> qw; + bits<4> sel16; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 1; + let Inst{22} = sel16{3}; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 1; + let Inst{10} = 0; + let Inst{9-7} = sel16{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCI_2Q_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, select_16:$sel16), + "!esp_srci_2q_p $qy, $qw, $sel16", + [(int_riscv_esp_srci_2q timm:$qy, timm:$qw, timm:$sel16)]>; + +def ESP_SRCMB_S16_Q_QACC: Esp32P4Inst<(outs QR:$qu), (ins QR:$qw, select_2:$sel2), + "esp.srcmb.s16.q.qacc\t $qu, $qw, $sel2", []> +{ + bits<3> qw; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = sel2{0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_S16_Q_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qw, select_2:$sel2, imm8:$qu), + "!esp_srcmb_s16_q_qacc_p $qu, $qw, $sel2", + [(int_riscv_esp_srcmb_s16_q_qacc timm:$qw, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_S16_QACC: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, select_2:$sel2), + "esp.srcmb.s16.qacc\t $qu, $rs1, $sel2", []> +{ + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 1; + let Inst{29} = sel2{0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_S16_QACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_2:$sel2, imm8:$qu), + "!esp_srcmb_s16_qacc_p $qu, $rs1, $sel2", + [(int_riscv_esp_srcmb_s16_qacc GPRPIE:$rs1, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_S8_Q_QACC: Esp32P4Inst<(outs QR:$qu), (ins QR:$qw, select_2:$sel2), + "esp.srcmb.s8.q.qacc\t $qu, $qw, $sel2", []> +{ + bits<3> qw; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 1; + let Inst{26} = sel2{0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_S8_Q_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qw, select_2:$sel2, imm8:$qu), + "!esp_srcmb_s8_q_qacc_p $qu, $qw, $sel2", + [(int_riscv_esp_srcmb_s8_q_qacc timm:$qw, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_S8_QACC: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, select_2:$sel2), + "esp.srcmb.s8.qacc\t $qu, $rs1, $sel2", []> +{ + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 1; + let Inst{29} = sel2{0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_S8_QACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_2:$sel2, imm8:$qu), + "!esp_srcmb_s8_qacc_p $qu, $rs1, $sel2", + [(int_riscv_esp_srcmb_s8_qacc GPRPIE:$rs1, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_U16_Q_QACC: Esp32P4Inst<(outs QR:$qu), (ins QR:$qw, select_2:$sel2), + "esp.srcmb.u16.q.qacc\t $qu, $qw, $sel2", []> +{ + bits<3> qw; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = sel2{0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_U16_Q_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qw, select_2:$sel2, imm8:$qu), + "!esp_srcmb_u16_q_qacc_p $qu, $qw, $sel2", + [(int_riscv_esp_srcmb_u16_q_qacc timm:$qw, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_U16_QACC: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, select_2:$sel2), + "esp.srcmb.u16.qacc\t $qu, $rs1, $sel2", []> +{ + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = sel2{0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_U16_QACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_2:$sel2, imm8:$qu), + "!esp_srcmb_u16_qacc_p $qu, $rs1, $sel2", + [(int_riscv_esp_srcmb_u16_qacc GPRPIE:$rs1, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_U8_Q_QACC: Esp32P4Inst<(outs QR:$qu), (ins QR:$qw, select_2:$sel2), + "esp.srcmb.u8.q.qacc\t $qu, $qw, $sel2", []> +{ + bits<3> qw; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = sel2{0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_U8_Q_QACC_P : PseudoESP32P4<(outs), (ins imm8:$qw, select_2:$sel2, imm8:$qu), + "!esp_srcmb_u8_q_qacc_p $qu, $qw, $sel2", + [(int_riscv_esp_srcmb_u8_q_qacc timm:$qw, timm:$sel2, timm:$qu)]>; + +def ESP_SRCMB_U8_QACC: Esp32P4Inst<(outs QR:$qu), (ins GPRPIE:$rs1, select_2:$sel2), + "esp.srcmb.u8.qacc\t $qu, $rs1, $sel2", []> +{ + bits<5> rs1; + bits<1> sel2; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = sel2{0}; + let Inst{28} = 1; + let Inst{27} = 1; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCMB_U8_QACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, select_2:$sel2, imm8:$qu), + "!esp_srcmb_u8_qacc_p $qu, $rs1, $sel2", + [(int_riscv_esp_srcmb_u8_qacc GPRPIE:$rs1, timm:$sel2, timm:$qu)]>; + +def ESP_SRCQ_128_ST_INCP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins QR:$qy, QR:$qw, GPRPIE:$rs1), + "esp.srcq.128.st.incp\t $qw, $qy, $rs1", []> +{ + bits<3> qy; + bits<3> qw; + bits<5> rs1; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 0; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCQ_128_ST_INCP_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, GPRPIE:$rs1), + "!esp_srcq_128_st_incp_p $qw, $qy, $rs1", + [(int_riscv_esp_srcq_128_st_incp timm:$qy, timm:$qw, GPRPIE:$rs1)]>; + +def ESP_SRCXXP_2Q: Esp32P4Inst<(outs QR:$qyr, QR:$qwr), (ins GPRPIE:$rs1, GPRPIE:$rs2, QR:$qy, QR:$qw), + "esp.srcxxp.2q\t $qy, $qw, $rs1, $rs2", []> +{ + bits<5> rs1; + bits<5> rs2; + bits<3> qy; + bits<3> qw; + bits<3> qyr; + bits<3> qwr; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$qyr = $qy, $qwr = $qw"; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = rs2{4}; + let Inst{22-20} = rs2{2-0}; + let Inst{19} = qw{2}; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = 1; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRCXXP_2Q_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rs2, imm8:$qy, imm8:$qw), + "!esp_srcxxp_2q_p $qy, $qw, $rs1, $rs2", + [(int_riscv_esp_srcxxp_2q GPRPIE:$rs1, GPRPIE:$rs2, timm:$qy, timm:$qw)]>; + +def ESP_SRS_S_XACC: Esp32P4Inst<(outs GPRPIE:$rd), (ins GPRPIE:$rs1), + "esp.srs.s.xacc\t $rd, $rs1", []> +{ + bits<5> rs1; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 1; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRS_S_XACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rd), + "!esp_srs_s_xacc_p $rd, $rs1", + [(int_riscv_esp_srs_s_xacc GPRPIE:$rs1, GPRPIE:$rd)]>; + +def ESP_SRS_U_XACC: Esp32P4Inst<(outs GPRPIE:$rd), (ins GPRPIE:$rs1), + "esp.srs.u.xacc\t $rd, $rs1", []> +{ + bits<5> rs1; + bits<5> rd; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 1; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 1; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 1; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11} = 0; + let Inst{10} = rd{4}; + let Inst{9-7} = rd{2-0}; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_SRS_U_XACC_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, GPRPIE:$rd), + "!esp_srs_u_xacc_p $rd, $rs1", + [(int_riscv_esp_srs_u_xacc GPRPIE:$rs1, GPRPIE:$rd)]>; + +def ESP_VSL_32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy), + "esp.vsl.32\t $qu, $qy", []> +{ + bits<3> qy; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSL_32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qu), + "!esp_vsl_32_p $qu, $qy", + [(int_riscv_esp_vsl_32 timm:$qy, timm:$qu)]>; + +def ESP_VSLD_16: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsld.16\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSLD_16_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsld_16_p $qu, $qy, $qw", + [(int_riscv_esp_vsld_16 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSLD_32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsld.32\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSLD_32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsld_32_p $qu, $qy, $qw", + [(int_riscv_esp_vsld_32 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSLD_8: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsld.8\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSLD_8_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsld_8_p $qu, $qy, $qw", + [(int_riscv_esp_vsld_8 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSR_S32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy), + "esp.vsr.s32\t $qu, $qy", []> +{ + bits<3> qy; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 1; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSR_S32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qu), + "!esp_vsr_s32_p $qu, $qy", + [(int_riscv_esp_vsr_s32 timm:$qy, timm:$qu)]>; + +def ESP_VSR_U32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy), + "esp.vsr.u32\t $qu, $qy", []> +{ + bits<3> qy; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 1; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23} = 0; + let Inst{22} = 0; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = 0; + let Inst{18} = 1; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSR_U32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qu), + "!esp_vsr_u32_p $qu, $qy", + [(int_riscv_esp_vsr_u32 timm:$qy, timm:$qu)]>; + +def ESP_VSRD_16: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsrd.16\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 1; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSRD_16_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsrd_16_p $qu, $qy, $qw", + [(int_riscv_esp_vsrd_16 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSRD_32: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsrd.32\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 1; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSRD_32_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsrd_32_p $qu, $qy, $qw", + [(int_riscv_esp_vsrd_32 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_VSRD_8: Esp32P4Inst<(outs QR:$qu), (ins QR:$qy, QR:$qw), + "esp.vsrd.8\t $qu, $qy, $qw", []> +{ + bits<3> qy; + bits<3> qw; + bits<3> qu; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31} = 0; + let Inst{30} = 0; + let Inst{29} = 0; + let Inst{28-26} = qy{2-0}; + let Inst{25-24} = qw{1-0}; + let Inst{23} = 0; + let Inst{22} = 1; + let Inst{21} = 0; + let Inst{20} = 0; + let Inst{19} = qw{2}; + let Inst{18} = 0; + let Inst{17} = 0; + let Inst{16} = 0; + let Inst{15} = 0; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12-10} = qu{2-0}; + let Inst{9} = 0; + let Inst{8} = 0; + let Inst{7} = 0; + let Inst{6} = 1; + let Inst{5} = 0; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 1; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_VSRD_8_P : PseudoESP32P4<(outs), (ins imm8:$qy, imm8:$qw, imm8:$qu), + "!esp_vsrd_8_p $qu, $qy, $qw", + [(int_riscv_esp_vsrd_8 timm:$qy, timm:$qw, timm:$qu)]>; + +def ESP_ST_S_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.st.s.xacc.ip\t $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 1; + let Inst{30} = off2568{7}; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off2568{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_S_XACC_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_st_s_xacc_ip_p $rs1, $off2568", + [(int_riscv_esp_st_s_xacc_ip GPRPIE:$rs1, timm:$off2568)]>; + +def ESP_ST_U_XACC_IP: Esp32P4Inst<(outs GPRPIE:$rs1r), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "esp.st.u.xacc.ip\t $rs1, $off2568", []> +{ + bits<5> rs1; + bits<8> off2568; + bits<5> rs1r; + let mayStore = 1; + let mayLoad = 0; + let hasSideEffects = 1; + + let Constraints = "$rs1r = $rs1"; + + let Inst{31} = 0; + let Inst{30} = off2568{7}; + let Inst{29} = 1; + let Inst{28} = 0; + let Inst{27} = 0; + let Inst{26} = 0; + let Inst{25} = 0; + let Inst{24} = 0; + let Inst{23-20} = off2568{6-3}; + let Inst{19} = 0; + let Inst{18} = rs1{4}; + let Inst{17-15} = rs1{2-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12-10} = off2568{2-0}; + let Inst{9} = 0; + let Inst{8} = 1; + let Inst{7} = 1; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 1; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +let usesCustomInserter = 1 in +def ESP_ST_U_XACC_IP_P : PseudoESP32P4<(outs), (ins GPRPIE:$rs1, offset_256_8:$off2568), + "!esp_st_u_xacc_ip_p $rs1, $off2568", + [(int_riscv_esp_st_u_xacc_ip GPRPIE:$rs1, timm:$off2568)]>; + diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td new file mode 100644 index 00000000000000..55eb4326575c28 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP4HWLP.td @@ -0,0 +1,172 @@ +//===- RISCVInstrInfoP4HWLP.td - RISCV Target Description -*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file describes the RISCV ESP32P4 DSP instructions in TableGen format. +// +// These definitions are generated +// This file is generated +// +//===----------------------------------------------------------------------===// + +def ESP_LP_SETUPI: Esp32P4Inst<(outs), (ins uimm1:$id, uimm12:$count, uimm10_step4:$offset), + "esp.lp.setupi\t $id, $count, $offset", []> +{ + bits<1> id; + bits<13> count; + bits<10> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = count{11-0}; + let Inst{19-15} = offset{5-1}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11-8} = offset{9-6}; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_SETUP: Esp32P4Inst<(outs), (ins uimm1:$id, GPRPIE:$rs1, uimm13_step4:$offset), + "esp.lp.setup\t $id, $rs1, $offset", []> +{ + bits<1> id; + bits<5> rs1; + bits<13> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = offset{12-1}; + let Inst{19-15} = rs1{4-0}; + let Inst{14} = 1; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_STARTI: Esp32P4Inst<(outs), (ins uimm1:$id, uimm13_step4:$offset), + "esp.lp.starti\t $id, $offset", []> +{ + bits<1> id; + bits<5> rs1; + bits<13> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = offset{12-1}; + let Inst{19-15} = 0xb00000; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 0; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_ENDI: Esp32P4Inst<(outs), (ins uimm1:$id, uimm13_step4:$offset), + "esp.lp.endi\t $id, $offset", []> +{ + bits<1> id; + bits<5> rs1; + bits<13> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = offset{12-1}; + let Inst{19-15} = 0xb00000; + let Inst{14} = 0; + let Inst{13} = 0; + let Inst{12} = 1; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_COUNTI: Esp32P4Inst<(outs), (ins uimm1:$id, uimm12:$count), + "esp.lp.counti\t $id, $count", []> +{ + bits<1> id; + bits<5> rs1; + bits<12> count; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = count{11-0}; + let Inst{19-15} = 0xb00000; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 1; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} + +def ESP_LP_COUNT: Esp32P4Inst<(outs), (ins uimm1:$id, GPRPIE:$rs1), + "esp.lp.count\t $id, $rs1", []> +{ + bits<1> id; + bits<5> rs1; + bits<13> offset; + let mayStore = 0; + let mayLoad = 0; + let hasSideEffects = 1; + + let Inst{31-20} = 0xb000000000000; + let Inst{19-15} = 0xb00000; + let Inst{14} = 0; + let Inst{13} = 1; + let Inst{12} = 0; + let Inst{11-8} = 0xb0000; + let Inst{7} = id; + let Inst{6} = 0; + let Inst{5} = 1; + let Inst{4} = 0; + let Inst{3} = 1; + let Inst{2} = 0; + let Inst{1} = 1; + let Inst{0} = 1; +} \ No newline at end of file diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td index 03ca505d100df4..613957271afaf0 100644 --- a/llvm/lib/Target/RISCV/RISCVProcessors.td +++ b/llvm/lib/Target/RISCV/RISCVProcessors.td @@ -348,3 +348,17 @@ def XIANGSHAN_NANHU : RISCVProcessorModel<"xiangshan-nanhu", FeatureStdExtSvinval, FeatureStdExtZicbom, FeatureStdExtZicboz]>; + +def ESPRESSIF_ESP32P4 : RISCVProcessorModel<"esp32p4", + NoSchedModel, + [Feature32Bit, + FeatureVendorESP32P4, + FeatureStdExtZicsr, + FeatureStdExtZifencei, + FeatureStdExtM, + FeatureStdExtA, + FeatureStdExtF, + FeatureStdExtC, + FeatureStdExtZcb, + FeatureStdExtZcmp, + FeatureStdExtZcmt]>; \ No newline at end of file diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td index 5a4d8c4cfece7f..c6da06f83f266d 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -116,6 +116,24 @@ let RegAltNameIndices = [ABIRegAltName] in { } } +class QRReg num, string n, listalt = []> : Register{ + let Namespace = "RISCV"; + let HWEncoding{15-0} = num; + let AltNames = alt; +} +let RegAltNameIndices = [ABIRegAltName] in { + def Q0 : QRReg<0, "q0", ["q0"]>, DwarfRegNum<[0]>; + def Q1 : QRReg<1, "q1", ["q1"]>, DwarfRegNum<[1]>; + def Q2 : QRReg<2, "q2", ["q2"]>, DwarfRegNum<[2]>; + def Q3 : QRReg<3, "q3", ["q3"]>, DwarfRegNum<[3]>; + def Q4 : QRReg<4, "q4", ["q4"]>, DwarfRegNum<[4]>; + def Q5 : QRReg<5, "q5", ["q5"]>, DwarfRegNum<[5]>; + def Q6 : QRReg<6, "q6", ["q6"]>, DwarfRegNum<[6]>; + def Q7 : QRReg<7, "q7", ["q7"]>, DwarfRegNum<[7]>; +} + +def QR : RegisterClass<"RISCV", [v16i8, v4i32], 128, (sequence "Q%u", 0, 7)>; + def XLenVT : ValueTypeByHwMode<[RV32, RV64], [i32, i64]>; // Allow f64 in GPR for ZDINX on RV64. @@ -141,6 +159,9 @@ def GPR : GPRRegisterClass<(add (sequence "X%u", 10, 17), (sequence "X%u", 18, 27), (sequence "X%u", 0, 4))>; +def GPRPIE : GPRRegisterClass<(add (sequence "X%u", 8, 15), + (sequence "X%u", 24, 31))>; + def GPRX0 : GPRRegisterClass<(add X0)>; def GPRX1 : GPRRegisterClass<(add X1)>; def GPRX5 : GPRRegisterClass<(add X5)>; diff --git a/llvm/test/CodeGen/RISCV/esp32p4.ll b/llvm/test/CodeGen/RISCV/esp32p4.ll new file mode 100644 index 00000000000000..a30afb429845e8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/esp32p4.ll @@ -0,0 +1,1289 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O1 -mtriple=riscv32 -mcpu=esp32p4 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=CHECK + +define void @test(){ +; CHECK-LABEL: test: +; CHECK: # %bb.0: +; CHECK-NEXT: cm.push {ra, s0-s11}, -64 +; CHECK-NEXT: .cfi_def_cfa_offset 64 +; CHECK-NEXT: .cfi_offset s0, -48 +; CHECK-NEXT: .cfi_offset s1, -44 +; CHECK-NEXT: .cfi_offset s8, -16 +; CHECK-NEXT: .cfi_offset s9, -12 +; CHECK-NEXT: .cfi_offset s10, -8 +; CHECK-NEXT: .cfi_offset s11, -4 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q0, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q1, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q2, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q3, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q4, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q5, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q6, a0, 784 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q7, a0, 784 +; CHECK-NEXT: esp.vcmulas.s16.qacc.h q0, q4 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vcmulas.s16.qacc.h.ld.ip q1, a0, -48, q6, q1 +; CHECK-NEXT: li s9, 12 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vcmulas.s16.qacc.h.ld.xp q1, a0, s9, q2, q7 +; CHECK-NEXT: esp.vcmulas.s16.qacc.l q7, q6 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vcmulas.s16.qacc.l.ld.ip q7, a0, 48, q7, q0 +; CHECK-NEXT: li t4, 14 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vcmulas.s16.qacc.l.ld.xp q1, a0, t4, q2, q7 +; CHECK-NEXT: esp.vcmulas.s8.qacc.h q1, q1 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vcmulas.s8.qacc.h.ld.ip q4, a0, 32, q1, q6 +; CHECK-NEXT: li s11, 7 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vcmulas.s8.qacc.h.ld.xp q6, a0, s11, q3, q2 +; CHECK-NEXT: esp.vcmulas.s8.qacc.l q4, q5 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vcmulas.s8.qacc.l.ld.ip q4, a0, -48, q2, q5 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vcmulas.s8.qacc.l.ld.xp q7, a0, s11, q6, q3 +; CHECK-NEXT: esp.vmulas.s16.qacc q4, q2 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmulas.s16.qacc.ld.ip q1, a0, 96, q5, q7 +; CHECK-NEXT: li t3, 3 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.s16.qacc.ld.xp q6, a0, t3, q4, q2 +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmulas.s16.qacc.st.ip q1, a0, 80, q7, q6 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vmulas.s16.qacc.st.xp q6, a0, a0, q0, q7 +; CHECK-NEXT: esp.vmulas.s16.xacc q3, q5 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vmulas.s16.xacc.ld.ip q5, a0, 96, q1, q7 +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vmulas.s16.xacc.ld.xp q0, a0, a2, q5, q5 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vmulas.s16.xacc.st.ip q2, a0, 16, q4, q6 +; CHECK-NEXT: li t6, 5 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmulas.s16.xacc.st.xp q7, a0, t6, q7, q7 +; CHECK-NEXT: esp.vmulas.s8.qacc q6, q1 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.s8.qacc.ld.ip q2, a0, -128, q3, q5 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.s8.qacc.ld.xp q4, a0, t6, q0, q5 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vmulas.s8.qacc.st.ip q7, a0, 16, q6, q0 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vmulas.s8.qacc.st.xp q4, a0, s9, q6, q1 +; CHECK-NEXT: esp.vmulas.s8.xacc q3, q7 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vmulas.s8.xacc.ld.ip q7, a0, -16, q4, q5 +; CHECK-NEXT: li t5, 10 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmulas.s8.xacc.ld.xp q1, a0, t5, q7, q0 +; CHECK-NEXT: li a1, 2 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.s8.xacc.st.ip q6, a0, -128, q6, q1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmulas.s8.xacc.st.xp q5, a0, a1, q4, q1 +; CHECK-NEXT: esp.vmulas.u16.qacc q6, q1 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.u16.qacc.ld.ip q7, a0, -32, q0, q0 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmulas.u16.qacc.ld.xp q2, a0, s11, q6, q7 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.u16.qacc.st.ip q4, a0, 16, q6, q5 +; CHECK-NEXT: li s0, 9 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmulas.u16.qacc.st.xp q4, a0, s0, q3, q7 +; CHECK-NEXT: esp.vmulas.u16.xacc q6, q1 +; CHECK-NEXT: li a4, 6 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vmulas.u16.xacc.ld.ip q2, a0, -48, q2, q2 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmulas.u16.xacc.ld.xp q7, a0, a4, q3, q0 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vmulas.u16.xacc.st.ip q0, a0, 96, q1, q4 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmulas.u16.xacc.st.xp q6, a0, t6, q3, q7 +; CHECK-NEXT: esp.vmulas.u8.qacc q7, q1 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vmulas.u8.qacc.ld.ip q7, a0, -48, q7, q4 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmulas.u8.qacc.ld.xp q4, a0, s9, q6, q7 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vmulas.u8.qacc.st.ip q2, a0, 0, q1, q7 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmulas.u8.qacc.st.xp q4, a0, a0, q0, q0 +; CHECK-NEXT: esp.vmulas.u8.xacc q6, q4 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vmulas.u8.xacc.ld.ip q3, a0, -80, q6, q2 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vmulas.u8.xacc.ld.xp q4, a0, a0, q5, q1 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vmulas.u8.xacc.st.ip q7, a0, -128, q2, q3 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vmulas.u8.xacc.st.xp q2, a0, a4, q7, q2 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vmulas.s16.qacc.ldbc.incp q0, a0, q0, q2 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmulas.s8.qacc.ldbc.incp q5, a0, q2, q6 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmulas.u16.qacc.ldbc.incp q5, a0, q7, q3 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmulas.u8.qacc.ldbc.incp q3, a0, q4, q4 +; CHECK-NEXT: esp.vsmulas.s16.qacc q1, q5, 14 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vsmulas.s16.qacc.ld.incp q0, a0, q7, q4, 0 +; CHECK-NEXT: esp.vsmulas.s8.qacc q3, q5, 0 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vsmulas.s8.qacc.ld.incp q1, a0, q1, q4, 6 +; CHECK-NEXT: esp.vsmulas.u16.qacc q6, q5, 15 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vsmulas.u16.qacc.ld.incp q7, a0, q7, q1, 10 +; CHECK-NEXT: esp.vsmulas.u8.qacc q0, q7, 2 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vsmulas.u8.qacc.ld.incp q4, a0, q3, q7, 8 +; CHECK-NEXT: esp.cmul.s16 q6, q0, q7, 3 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.cmul.s16.ld.incp q5, a0, q3, q0, q3, 0 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.cmul.s16.st.incp q5, a0, q0, q4, q5, 2 +; CHECK-NEXT: esp.cmul.s8 q1, q1, q0, 3 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.cmul.s8.ld.incp q4, a0, q7, q5, q4, 1 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.cmul.s8.st.incp q5, a0, q0, q6, q0, 3 +; CHECK-NEXT: esp.cmul.u16 q7, q7, q5, 2 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.cmul.u16.ld.incp q0, a0, q0, q0, q1, 1 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.cmul.u16.st.incp q2, a0, q4, q1, q4, 2 +; CHECK-NEXT: esp.cmul.u8 q3, q7, q5, 0 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.cmul.u8.ld.incp q4, a0, q0, q0, q2, 0 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.cmul.u8.st.incp q1, a0, q4, q6, q3, 2 +; CHECK-NEXT: esp.max.s16.a q6, a0 +; CHECK-NEXT: esp.max.s32.a q2, a0 +; CHECK-NEXT: esp.max.s8.a q0, a0 +; CHECK-NEXT: esp.max.u16.a q6, a0 +; CHECK-NEXT: esp.max.u32.a q6, a0 +; CHECK-NEXT: esp.max.u8.a q1, a0 +; CHECK-NEXT: esp.min.s16.a q6, a0 +; CHECK-NEXT: esp.min.s32.a q1, a0 +; CHECK-NEXT: esp.min.s8.a q0, a0 +; CHECK-NEXT: esp.min.u16.a q3, a0 +; CHECK-NEXT: esp.min.u32.a q0, a0 +; CHECK-NEXT: esp.min.u8.a q5, a0 +; CHECK-NEXT: esp.vabs.16 q6, q0 +; CHECK-NEXT: esp.vabs.32 q1, q4 +; CHECK-NEXT: esp.vabs.8 q5, q2 +; CHECK-NEXT: esp.vadd.s16 q6, q1, q5 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vadd.s16.ld.incp q0, a0, q1, q0, q6 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vadd.s16.st.incp q1, a0, q7, q0, q4 +; CHECK-NEXT: esp.vadd.s32 q7, q7, q3 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vadd.s32.ld.incp q4, a0, q4, q7, q2 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vadd.s32.st.incp q2, a0, q7, q1, q7 +; CHECK-NEXT: esp.vadd.s8 q7, q1, q7 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vadd.s8.ld.incp q2, a0, q1, q5, q6 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vadd.s8.st.incp q3, a0, q4, q1, q0 +; CHECK-NEXT: esp.vadd.u16 q0, q7, q7 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vadd.u16.ld.incp q6, a0, q1, q7, q5 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vadd.u16.st.incp q0, a0, q7, q6, q3 +; CHECK-NEXT: esp.vadd.u32 q4, q0, q1 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vadd.u32.ld.incp q1, a0, q4, q5, q0 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vadd.u32.st.incp q4, a0, q6, q0, q1 +; CHECK-NEXT: esp.vadd.u8 q5, q2, q5 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vadd.u8.ld.incp q7, a0, q1, q4, q3 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vadd.u8.st.incp q0, a0, q2, q0, q0 +; CHECK-NEXT: esp.vclamp.s16 q4, q5, 14 +; CHECK-NEXT: esp.vmax.s16 q5, q6, q5 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmax.s16.ld.incp q2, a0, q3, q5, q5 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmax.s16.st.incp q3, a0, q4, q3, q5 +; CHECK-NEXT: esp.vmax.s32 q2, q5, q2 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmax.s32.ld.incp q0, a0, q6, q0, q1 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vmax.s32.st.incp q6, a0, q1, q7, q6 +; CHECK-NEXT: esp.vmax.s8 q7, q5, q7 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.vmax.s8.ld.incp q6, a0, q1, q5, q1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmax.s8.st.incp q5, a0, q7, q1, q3 +; CHECK-NEXT: esp.vmax.u16 q1, q4, q1 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmax.u16.ld.incp q3, a0, q5, q5, q4 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmax.u16.st.incp q5, a0, q5, q0, q7 +; CHECK-NEXT: esp.vmax.u32 q4, q0, q2 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmax.u32.ld.incp q6, a0, q1, q0, q6 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmax.u32.st.incp q0, a0, q1, q4, q7 +; CHECK-NEXT: esp.vmax.u8 q5, q2, q0 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vmax.u8.ld.incp q0, a0, q5, q6, q1 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vmax.u8.st.incp q7, a0, q1, q6, q7 +; CHECK-NEXT: esp.vmin.s16 q4, q1, q3 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vmin.s16.ld.incp q2, a0, q2, q2, q1 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmin.s16.st.incp q2, a0, q1, q7, q6 +; CHECK-NEXT: esp.vmin.s32 q2, q0, q3 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vmin.s32.ld.incp q1, a0, q5, q7, q6 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmin.s32.st.incp q7, a0, q5, q5, q1 +; CHECK-NEXT: esp.vmin.s8 q2, q3, q6 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmin.s8.ld.incp q7, a0, q1, q4, q3 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vmin.s8.st.incp q1, a0, q4, q0, q1 +; CHECK-NEXT: esp.vmin.u16 q4, q3, q7 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: esp.vmin.u16.ld.incp q4, a0, q5, q6, q6 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.vmin.u16.st.incp q1, a0, q2, q6, q0 +; CHECK-NEXT: esp.vmin.u32 q5, q0, q7 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vmin.u32.ld.incp q7, a0, q6, q5, q6 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vmin.u32.st.incp q5, a0, q4, q3, q7 +; CHECK-NEXT: esp.vmin.u8 q7, q5, q5 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmin.u8.ld.incp q2, a0, q5, q0, q5 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.vmin.u8.st.incp q2, a0, q1, q6, q6 +; CHECK-NEXT: esp.vmul.s16 q6, q2, q1 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.vmul.s16.ld.incp q0, a0, q3, q6, q7 +; CHECK-NEXT: esp.vmul.s16.s8xs8 q7, q0, q3, q5 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vmul.s16.st.incp q2, a0, q0, q7, q1 +; CHECK-NEXT: esp.vmul.s32.s16xs16 q3, q4, q5, q2 +; CHECK-NEXT: esp.vmul.s8 q3, q4, q0 +; CHECK-NEXT: li a0, 0 +; CHECK-NEXT: esp.vmul.s8.ld.incp q0, a0, q2, q2, q3 +; CHECK-NEXT: li s1, 4 +; CHECK-NEXT: li s8, 13 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vmul.s8.st.incp q0, a0, q3, q0, q7 +; CHECK-NEXT: esp.vmul.u16 q2, q3, q7 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vmul.u16.ld.incp q5, a0, q5, q6, q6 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vmul.u16.st.incp q3, a0, q2, q4, q4 +; CHECK-NEXT: esp.vmul.u8 q7, q3, q7 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vmul.u8.ld.incp q0, a0, q1, q0, q6 +; CHECK-NEXT: li s10, 1 +; CHECK-NEXT: li a0, 11 +; CHECK-NEXT: li a3, 6 +; CHECK-NEXT: esp.vmul.u8.st.incp q7, a3, q4, q0, q3 +; CHECK-NEXT: esp.vprelu.s16 q1, q4, q3, a2 +; CHECK-NEXT: esp.vprelu.s8 q2, q4, q5, t3 +; CHECK-NEXT: esp.vrelu.s16 q6, s0, a2 +; CHECK-NEXT: esp.vrelu.s8 q5, s10, s9 +; CHECK-NEXT: esp.vsadds.s16 q3, q3, s9 +; CHECK-NEXT: esp.vsadds.s8 q7, q1, s11 +; CHECK-NEXT: esp.vsadds.u16 q3, q2, s1 +; CHECK-NEXT: esp.vsadds.u8 q2, q3, a2 +; CHECK-NEXT: esp.vsat.s16 q5, q0, s0, s10 +; CHECK-NEXT: esp.vsat.s32 q3, q3, s9, s0 +; CHECK-NEXT: esp.vsat.s8 q0, q7, t5, a1 +; CHECK-NEXT: esp.vsat.u16 q3, q7, s11, s11 +; CHECK-NEXT: esp.vsat.u32 q3, q5, a2, a1 +; CHECK-NEXT: esp.vsat.u8 q0, q6, s10, s8 +; CHECK-NEXT: esp.vssubs.s16 q3, q7, t3 +; CHECK-NEXT: esp.vssubs.s8 q7, q0, t4 +; CHECK-NEXT: esp.vssubs.u16 q5, q4, a5 +; CHECK-NEXT: esp.vssubs.u8 q5, q1, a0 +; CHECK-NEXT: esp.vsub.s16 q0, q0, q6 +; CHECK-NEXT: li a3, 6 +; CHECK-NEXT: esp.vsub.s16.ld.incp q2, a3, q2, q3, q7 +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: esp.vsub.s16.st.incp q7, a3, q0, q0, q3 +; CHECK-NEXT: esp.vsub.s32 q7, q2, q7 +; CHECK-NEXT: li a3, 7 +; CHECK-NEXT: esp.vsub.s32.ld.incp q4, a3, q3, q2, q0 +; CHECK-NEXT: li a3, 5 +; CHECK-NEXT: esp.vsub.s32.st.incp q4, a3, q1, q1, q1 +; CHECK-NEXT: esp.vsub.s8 q7, q5, q6 +; CHECK-NEXT: li a3, 1 +; CHECK-NEXT: esp.vsub.s8.ld.incp q4, a3, q1, q2, q6 +; CHECK-NEXT: li a3, 4 +; CHECK-NEXT: esp.vsub.s8.st.incp q5, a3, q4, q2, q3 +; CHECK-NEXT: esp.vsub.u16 q5, q7, q0 +; CHECK-NEXT: li a3, 11 +; CHECK-NEXT: esp.vsub.u16.ld.incp q4, a3, q0, q7, q5 +; CHECK-NEXT: li a3, 11 +; CHECK-NEXT: esp.vsub.u16.st.incp q0, a3, q1, q3, q1 +; CHECK-NEXT: esp.vsub.u32 q5, q4, q2 +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: esp.vsub.u32.ld.incp q4, a3, q2, q4, q2 +; CHECK-NEXT: li a3, 11 +; CHECK-NEXT: esp.vsub.u32.st.incp q0, a3, q7, q7, q4 +; CHECK-NEXT: esp.vsub.u8 q6, q5, q4 +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: esp.vsub.u8.ld.incp q4, a3, q6, q2, q4 +; CHECK-NEXT: esp.vsub.u8.st.incp q3, a0, q3, q2, q0 +; CHECK-NEXT: esp.addx2 zero, t6, t4 +; CHECK-NEXT: esp.addx4 zero, t4, t6 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.sat a0, a4, a1 +; CHECK-NEXT: esp.subx2 zero, a1, a1 +; CHECK-NEXT: esp.subx4 zero, a5, s0 +; CHECK-NEXT: esp.andq q7, q6, q3 +; CHECK-NEXT: esp.notq q6, q5 +; CHECK-NEXT: esp.orq q1, q1, q0 +; CHECK-NEXT: esp.xorq q5, q1, q6 +; CHECK-NEXT: esp.vcmp.eq.s16 q1, q0, q2 +; CHECK-NEXT: esp.vcmp.eq.s32 q5, q1, q6 +; CHECK-NEXT: esp.vcmp.eq.s8 q2, q0, q3 +; CHECK-NEXT: esp.vcmp.eq.u16 q7, q7, q1 +; CHECK-NEXT: esp.vcmp.eq.u32 q2, q1, q2 +; CHECK-NEXT: esp.vcmp.eq.u8 q3, q1, q6 +; CHECK-NEXT: esp.vcmp.gt.s16 q4, q5, q6 +; CHECK-NEXT: esp.vcmp.gt.s32 q0, q6, q2 +; CHECK-NEXT: esp.vcmp.gt.s8 q2, q3, q5 +; CHECK-NEXT: esp.vcmp.gt.u16 q7, q7, q4 +; CHECK-NEXT: esp.vcmp.gt.u32 q2, q6, q2 +; CHECK-NEXT: esp.vcmp.gt.u8 q0, q2, q0 +; CHECK-NEXT: esp.vcmp.lt.s16 q7, q2, q1 +; CHECK-NEXT: esp.vcmp.lt.s32 q4, q2, q1 +; CHECK-NEXT: esp.vcmp.lt.s8 q6, q5, q2 +; CHECK-NEXT: esp.vcmp.lt.u16 q4, q1, q5 +; CHECK-NEXT: esp.vcmp.lt.u32 q2, q5, q6 +; CHECK-NEXT: esp.vcmp.lt.u8 q5, q3, q5 +; CHECK-NEXT: esp.mov.s16.qacc q2 +; CHECK-NEXT: esp.mov.s8.qacc q5 +; CHECK-NEXT: esp.mov.u16.qacc q5 +; CHECK-NEXT: esp.mov.u8.qacc q3 +; CHECK-NEXT: esp.movi.16.a q2, a0, 3 +; CHECK-NEXT: esp.movi.16.q q3, s11, 13 +; CHECK-NEXT: esp.movi.32.a q6, a0, 1 +; CHECK-NEXT: esp.movi.32.q q5, s0, 1 +; CHECK-NEXT: esp.movi.8.a q5, a0, 15 +; CHECK-NEXT: esp.movi.8.q q1, a5, 6 +; CHECK-NEXT: esp.movx.r.cfg a0 +; CHECK-NEXT: esp.movx.r.fft.bit.width a0 +; CHECK-NEXT: li a0, 33 +; CHECK-NEXT: esp.movx.r.perf a0, a0 +; CHECK-NEXT: esp.movx.r.sar a0 +; CHECK-NEXT: esp.movx.r.sar.bytes a0 +; CHECK-NEXT: esp.movx.r.xacc.h a0 +; CHECK-NEXT: esp.movx.r.xacc.l a0 +; CHECK-NEXT: esp.movx.w.cfg t5 +; CHECK-NEXT: esp.movx.w.fft.bit.width s10 +; CHECK-NEXT: esp.movx.w.perf a2 +; CHECK-NEXT: esp.movx.w.sar t3 +; CHECK-NEXT: esp.movx.w.sar.bytes s1 +; CHECK-NEXT: esp.movx.w.xacc.h a2 +; CHECK-NEXT: esp.movx.w.xacc.l s1 +; CHECK-NEXT: esp.vext.s16 q7, q0, q6 +; CHECK-NEXT: esp.vext.s8 q5, q3, q3 +; CHECK-NEXT: esp.vext.u16 q4, q2, q6 +; CHECK-NEXT: esp.vext.u8 q4, q0, q0 +; CHECK-NEXT: esp.vunzip.16 q1, q0 +; CHECK-NEXT: esp.vunzip.32 q6, q4 +; CHECK-NEXT: esp.vunzip.8 q2, q1 +; CHECK-NEXT: esp.vunzipt.16 q7, q0, q2 +; CHECK-NEXT: esp.vunzipt.8 q0, q6, q2 +; CHECK-NEXT: esp.vzip.16 q1, q6 +; CHECK-NEXT: esp.vzip.32 q4, q6 +; CHECK-NEXT: esp.vzip.8 q4, q0 +; CHECK-NEXT: esp.vzipt.16 q0, q3, q5 +; CHECK-NEXT: esp.vzipt.8 q6, q1, q5 +; CHECK-NEXT: esp.zero.q q5 +; CHECK-NEXT: esp.zero.qacc +; CHECK-NEXT: esp.zero.xacc +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.fft.ams.s16.ld.incp q6, a0, q6, q0, q3, q0, q1, 0 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.fft.ams.s16.ld.incp.uaup q3, a0, q0, q2, q3, q1, q0, 0 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.fft.ams.s16.ld.r32.decp q7, a0, q0, q6, q3, q1, q5, 1 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: li a3, 4 +; CHECK-NEXT: esp.fft.ams.s16.st.incp q5, q7, a0, a3, q5, q3, q6, 0 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.fft.bitrev q7, a0 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.fft.cmul.s16.ld.xp q2, a0, s10, q3, q7, q7, 1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.fft.cmul.s16.st.xp q7, q0, q4, a0, a1, 4, 3, 1 +; CHECK-NEXT: esp.fft.r2bf.s16 q7, q3, q5, q1, 0 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.fft.r2bf.s16.st.incp q7, q7, q4, a0, 2 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.fft.vst.r32.decp q5, a0, 1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.ld.128.usar.ip q1, a0, 608 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.ld.128.usar.xp q2, a0, a2 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.ld.xacc.ip a0, 400 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.ldqa.s16.128.ip a0, 912 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.ldqa.s16.128.xp a0, t5 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.ldqa.s8.128.ip a0, 1824 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.ldqa.s8.128.xp a0, s1 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.ldqa.u16.128.ip a0, -1904 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.ldqa.u16.128.xp a0, t4 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.ldqa.u8.128.ip a0, 1216 +; CHECK-NEXT: li a0, 2 +; CHECK-NEXT: esp.ldqa.u8.128.xp a0, a4 +; CHECK-NEXT: li a0, 9 +; CHECK-NEXT: esp.vldbc.16.ip q7, a0, -448 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.vldbc.16.xp q3, a0, s0 +; CHECK-NEXT: mv a0, a5 +; CHECK-NEXT: esp.vldbc.32.ip q3, a0, 220 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.vldbc.32.xp q7, a0, a1 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.vldbc.8.ip q2, a0, 396 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vldbc.8.xp q7, a0, s0 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vldext.s16.ip q7, q4, a0, 16 +; CHECK-NEXT: mv a0, a5 +; CHECK-NEXT: esp.vldext.s16.xp q5, q0, a0, a2 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.vldext.s8.ip q3, q6, a0, 80 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vldext.s8.xp q1, q1, a0, a4 +; CHECK-NEXT: li a0, 14 +; CHECK-NEXT: esp.vldext.u16.ip q2, q5, a0, 48 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vldext.u16.xp q2, q0, a0, s9 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vldext.u8.ip q7, q2, a0, 64 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vldext.u8.xp q7, q2, a0, a0 +; CHECK-NEXT: li a0, 1 +; CHECK-NEXT: esp.vldhbc.16.incp q4, q7, a0 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.ld.qacc.h.h.128.ip a0, 512 +; CHECK-NEXT: li a0, 5 +; CHECK-NEXT: esp.ld.qacc.h.l.128.ip a0, -784 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.ld.qacc.l.h.128.ip a0, -800 +; CHECK-NEXT: li a0, 10 +; CHECK-NEXT: esp.ld.qacc.l.l.128.ip a0, -1952 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.ld.ua.state.ip a0, -752 +; CHECK-NEXT: esp.ldxq.32 q7, q4, a5, 2, 4 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.st.qacc.h.h.128.ip a0, -336 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.st.qacc.h.l.128.ip a0, 1568 +; CHECK-NEXT: li a0, 4 +; CHECK-NEXT: esp.st.qacc.l.h.128.ip a0, 16 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.st.qacc.l.l.128.ip a0, 416 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.st.ua.state.ip a0, -1360 +; CHECK-NEXT: esp.stxq.32 q0, q6, a4, 2, 5 +; CHECK-NEXT: li a0, 8 +; CHECK-NEXT: esp.vld.128.ip q3, a0, 784 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: esp.vld.128.xp q3, a0, s0 +; CHECK-NEXT: mv a0, a5 +; CHECK-NEXT: esp.vld.h.64.ip q0, a0, -352 +; CHECK-NEXT: esp.vld.h.64.xp q2, a1, t4 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vld.l.64.ip q2, a0, 56 +; CHECK-NEXT: esp.vld.l.64.xp q5, s0, s10 +; CHECK-NEXT: li a0, 6 +; CHECK-NEXT: esp.vst.128.ip q5, a0, -960 +; CHECK-NEXT: li a0, 13 +; CHECK-NEXT: esp.vst.128.xp q6, a0, s11 +; CHECK-NEXT: esp.vst.h.64.ip q7, s1, 944 +; CHECK-NEXT: esp.vst.h.64.xp q7, s11, t5 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vst.l.64.ip q5, a0, 984 +; CHECK-NEXT: li a0, 3 +; CHECK-NEXT: esp.vst.l.64.xp q5, a0, s9 +; CHECK-NEXT: esp.slci.2q q1, q5, 12 +; CHECK-NEXT: esp.slcxxp.2q q2, q3, t6, t6 +; CHECK-NEXT: esp.src.q q2, q1, q3 +; CHECK-NEXT: esp.src.q.ld.ip q0, a5, -272, q5, q5 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.src.q.ld.xp q1, a0, t3, q7, q6 +; CHECK-NEXT: esp.src.q.qup q3, q7, q4 +; CHECK-NEXT: esp.srci.2q q2, q3, 7 +; CHECK-NEXT: esp.srcmb.s16.q.qacc q2, q4, 0 +; CHECK-NEXT: esp.srcmb.s16.qacc q5, s8, 1 +; CHECK-NEXT: esp.srcmb.s8.q.qacc q5, q4, 0 +; CHECK-NEXT: esp.srcmb.s8.qacc q1, a2, 1 +; CHECK-NEXT: esp.srcmb.u16.q.qacc q0, q3, 1 +; CHECK-NEXT: esp.srcmb.u16.qacc q7, s8, 0 +; CHECK-NEXT: esp.srcmb.u8.q.qacc q3, q5, 1 +; CHECK-NEXT: esp.srcmb.u8.qacc q0, t6, 0 +; CHECK-NEXT: li a0, 12 +; CHECK-NEXT: esp.srcq.128.st.incp q0, q5, a0 +; CHECK-NEXT: esp.srcxxp.2q q4, q6, s9, a4 +; CHECK-NEXT: esp.srs.s.xacc a0, a4 +; CHECK-NEXT: esp.srs.u.xacc a0, t3 +; CHECK-NEXT: esp.vsl.32 q5, q2 +; CHECK-NEXT: esp.vsld.16 q3, q3, q7 +; CHECK-NEXT: esp.vsld.32 q3, q7, q1 +; CHECK-NEXT: esp.vsld.8 q0, q1, q5 +; CHECK-NEXT: esp.vsr.s32 q3, q0 +; CHECK-NEXT: esp.vsr.u32 q1, q2 +; CHECK-NEXT: esp.vsrd.16 q4, q3, q0 +; CHECK-NEXT: esp.vsrd.32 q0, q6, q3 +; CHECK-NEXT: esp.vsrd.8 q5, q4, q1 +; CHECK-NEXT: esp.st.s.xacc.ip a2, 80 +; CHECK-NEXT: esp.st.u.xacc.ip a4, -464 +; CHECK-NEXT: cm.popret {ra, s0-s11}, 64 + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 0) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 1) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 2) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 3) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 4) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 5) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 6) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 7) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32 0, i32 4) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32 6, i32 1, i32 10, i32 -48, i32 1) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32 12, i32 2, i32 7, i32 2, i32 1) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32 7, i32 6) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32 7, i32 0, i32 8, i32 48, i32 7) + tail call void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32 14, i32 2, i32 7, i32 7, i32 1) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32 1, i32 1) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32 1, i32 6, i32 5, i32 32, i32 4) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32 7, i32 3, i32 2, i32 2, i32 6) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32 4, i32 5) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32 2, i32 5, i32 4, i32 -48, i32 4) + tail call void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32 7, i32 6, i32 3, i32 14, i32 7) + tail call void @llvm.riscv.esp.vmulas.s16.qacc(i32 4, i32 2) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32 5, i32 7, i32 4, i32 96, i32 1) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32 3, i32 4, i32 2, i32 8, i32 6) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32 7, i32 6, i32 1, i32 0, i32 80) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32 5, i32 0, i32 7, i32 6, i32 5) + tail call void @llvm.riscv.esp.vmulas.s16.xacc(i32 3, i32 5) + tail call void @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32 1, i32 7, i32 9, i32 96, i32 5) + tail call void @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32 8, i32 5, i32 5, i32 13, i32 0) + tail call void @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32 4, i32 6, i32 2, i32 1, i32 16) + tail call void @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32 5, i32 7, i32 7, i32 7, i32 2) + tail call void @llvm.riscv.esp.vmulas.s8.qacc(i32 6, i32 1) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32 3, i32 5, i32 8, i32 -128, i32 2) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32 5, i32 0, i32 5, i32 8, i32 4) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32 6, i32 0, i32 7, i32 1, i32 16) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32 12, i32 6, i32 1, i32 4, i32 10) + tail call void @llvm.riscv.esp.vmulas.s8.xacc(i32 3, i32 7) + tail call void @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32 4, i32 5, i32 1, i32 -16, i32 7) + tail call void @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32 10, i32 7, i32 0, i32 7, i32 1) + tail call void @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32 6, i32 1, i32 6, i32 8, i32 -128) + tail call void @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32 2, i32 4, i32 1, i32 5, i32 4) + tail call void @llvm.riscv.esp.vmulas.u16.qacc(i32 6, i32 1) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32 0, i32 0, i32 8, i32 -32, i32 7) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32 7, i32 6, i32 7, i32 6, i32 2) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32 6, i32 5, i32 4, i32 8, i32 16) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32 9, i32 3, i32 7, i32 4, i32 2) + tail call void @llvm.riscv.esp.vmulas.u16.xacc(i32 6, i32 1) + tail call void @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32 2, i32 2, i32 3, i32 -48, i32 2) + tail call void @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32 6, i32 3, i32 0, i32 0, i32 7) + tail call void @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32 1, i32 4, i32 0, i32 9, i32 96) + tail call void @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32 5, i32 3, i32 7, i32 6, i32 2) + tail call void @llvm.riscv.esp.vmulas.u8.qacc(i32 7, i32 1) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32 7, i32 4, i32 9, i32 -48, i32 7) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32 12, i32 6, i32 7, i32 11, i32 4) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32 1, i32 7, i32 2, i32 14, i32 0) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32 7, i32 0, i32 0, i32 4, i32 7) + tail call void @llvm.riscv.esp.vmulas.u8.xacc(i32 6, i32 4) + tail call void @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32 6, i32 2, i32 5, i32 -80, i32 3) + tail call void @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32 13, i32 5, i32 1, i32 13, i32 4) + tail call void @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32 2, i32 3, i32 7, i32 5, i32 -128) + tail call void @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32 6, i32 7, i32 2, i32 2, i32 5) + tail call void @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32 0, i32 2, i32 14, i32 0) + tail call void @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32 2, i32 6, i32 0, i32 5) + tail call void @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32 7, i32 3, i32 8, i32 5) + tail call void @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32 4, i32 4, i32 6, i32 3) + tail call void @llvm.riscv.esp.vsmulas.s16.qacc(i32 1, i32 5, i32 14) + tail call void @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32 7, i32 4, i32 5, i32 0, i32 0) + tail call void @llvm.riscv.esp.vsmulas.s8.qacc(i32 3, i32 5, i32 0) + tail call void @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32 1, i32 4, i32 8, i32 6, i32 1) + tail call void @llvm.riscv.esp.vsmulas.u16.qacc(i32 6, i32 5, i32 15) + tail call void @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32 7, i32 1, i32 0, i32 10, i32 7) + tail call void @llvm.riscv.esp.vsmulas.u8.qacc(i32 0, i32 7, i32 2) + tail call void @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32 3, i32 7, i32 10, i32 8, i32 4) + tail call void @llvm.riscv.esp.cmul.s16(i32 0, i32 7, i32 3, i32 6) + tail call void @llvm.riscv.esp.cmul.s16.ld.incp(i32 0, i32 3, i32 6, i32 0, i32 3, i32 5) + tail call void @llvm.riscv.esp.cmul.s16.st.incp(i32 4, i32 5, i32 5, i32 0, i32 2, i32 0) + tail call void @llvm.riscv.esp.cmul.s8(i32 1, i32 0, i32 3, i32 1) + tail call void @llvm.riscv.esp.cmul.s8.ld.incp(i32 5, i32 4, i32 5, i32 1, i32 7, i32 4) + tail call void @llvm.riscv.esp.cmul.s8.st.incp(i32 6, i32 0, i32 5, i32 14, i32 3, i32 0) + tail call void @llvm.riscv.esp.cmul.u16(i32 7, i32 5, i32 2, i32 7) + tail call void @llvm.riscv.esp.cmul.u16.ld.incp(i32 0, i32 1, i32 2, i32 1, i32 0, i32 0) + tail call void @llvm.riscv.esp.cmul.u16.st.incp(i32 1, i32 4, i32 2, i32 5, i32 2, i32 4) + tail call void @llvm.riscv.esp.cmul.u8(i32 7, i32 5, i32 0, i32 3) + tail call void @llvm.riscv.esp.cmul.u8.ld.incp(i32 0, i32 2, i32 11, i32 0, i32 0, i32 4) + tail call void @llvm.riscv.esp.cmul.u8.st.incp(i32 6, i32 3, i32 1, i32 10, i32 2, i32 4) + tail call void @llvm.riscv.esp.max.s16.a(i32 6, i32 3) + tail call void @llvm.riscv.esp.max.s32.a(i32 2, i32 0) + tail call void @llvm.riscv.esp.max.s8.a(i32 0, i32 9) + tail call void @llvm.riscv.esp.max.u16.a(i32 6, i32 6) + tail call void @llvm.riscv.esp.max.u32.a(i32 6, i32 1) + tail call void @llvm.riscv.esp.max.u8.a(i32 1, i32 4) + tail call void @llvm.riscv.esp.min.s16.a(i32 6, i32 11) + tail call void @llvm.riscv.esp.min.s32.a(i32 1, i32 14) + tail call void @llvm.riscv.esp.min.s8.a(i32 0, i32 1) + tail call void @llvm.riscv.esp.min.u16.a(i32 3, i32 14) + tail call void @llvm.riscv.esp.min.u32.a(i32 0, i32 9) + tail call void @llvm.riscv.esp.min.u8.a(i32 5, i32 8) + tail call void @llvm.riscv.esp.vabs.16(i32 0, i32 6) + tail call void @llvm.riscv.esp.vabs.32(i32 4, i32 1) + tail call void @llvm.riscv.esp.vabs.8(i32 2, i32 5) + tail call void @llvm.riscv.esp.vadd.s16(i32 1, i32 5, i32 6) + tail call void @llvm.riscv.esp.vadd.s16.ld.incp(i32 0, i32 6, i32 10, i32 1, i32 0) + tail call void @llvm.riscv.esp.vadd.s16.st.incp(i32 0, i32 4, i32 1, i32 11, i32 7) + tail call void @llvm.riscv.esp.vadd.s32(i32 7, i32 3, i32 7) + tail call void @llvm.riscv.esp.vadd.s32.ld.incp(i32 7, i32 2, i32 14, i32 4, i32 4) + tail call void @llvm.riscv.esp.vadd.s32.st.incp(i32 1, i32 7, i32 2, i32 13, i32 7) + tail call void @llvm.riscv.esp.vadd.s8(i32 1, i32 7, i32 7) + tail call void @llvm.riscv.esp.vadd.s8.ld.incp(i32 5, i32 6, i32 14, i32 1, i32 2) + tail call void @llvm.riscv.esp.vadd.s8.st.incp(i32 1, i32 0, i32 3, i32 9, i32 4) + tail call void @llvm.riscv.esp.vadd.u16(i32 7, i32 7, i32 0) + tail call void @llvm.riscv.esp.vadd.u16.ld.incp(i32 7, i32 5, i32 8, i32 1, i32 6) + tail call void @llvm.riscv.esp.vadd.u16.st.incp(i32 6, i32 3, i32 0, i32 5, i32 7) + tail call void @llvm.riscv.esp.vadd.u32(i32 0, i32 1, i32 4) + tail call void @llvm.riscv.esp.vadd.u32.ld.incp(i32 5, i32 0, i32 2, i32 4, i32 1) + tail call void @llvm.riscv.esp.vadd.u32.st.incp(i32 0, i32 1, i32 4, i32 1, i32 6) + tail call void @llvm.riscv.esp.vadd.u8(i32 2, i32 5, i32 5) + tail call void @llvm.riscv.esp.vadd.u8.ld.incp(i32 4, i32 3, i32 8, i32 1, i32 7) + tail call void @llvm.riscv.esp.vadd.u8.st.incp(i32 0, i32 0, i32 0, i32 5, i32 2) + tail call void @llvm.riscv.esp.vclamp.s16(i32 5, i32 14, i32 4) + tail call void @llvm.riscv.esp.vmax.s16(i32 6, i32 5, i32 5) + tail call void @llvm.riscv.esp.vmax.s16.ld.incp(i32 5, i32 5, i32 11, i32 3, i32 2) + tail call void @llvm.riscv.esp.vmax.s16.st.incp(i32 3, i32 5, i32 3, i32 2, i32 4) + tail call void @llvm.riscv.esp.vmax.s32(i32 5, i32 2, i32 2) + tail call void @llvm.riscv.esp.vmax.s32.ld.incp(i32 0, i32 1, i32 7, i32 6, i32 0) + tail call void @llvm.riscv.esp.vmax.s32.st.incp(i32 7, i32 6, i32 6, i32 14, i32 1) + tail call void @llvm.riscv.esp.vmax.s8(i32 5, i32 7, i32 7) + tail call void @llvm.riscv.esp.vmax.s8.ld.incp(i32 5, i32 1, i32 2, i32 1, i32 6) + tail call void @llvm.riscv.esp.vmax.s8.st.incp(i32 1, i32 3, i32 5, i32 4, i32 7) + tail call void @llvm.riscv.esp.vmax.u16(i32 4, i32 1, i32 1) + tail call void @llvm.riscv.esp.vmax.u16.ld.incp(i32 5, i32 4, i32 0, i32 5, i32 3) + tail call void @llvm.riscv.esp.vmax.u16.st.incp(i32 0, i32 7, i32 5, i32 11, i32 5) + tail call void @llvm.riscv.esp.vmax.u32(i32 0, i32 2, i32 4) + tail call void @llvm.riscv.esp.vmax.u32.ld.incp(i32 0, i32 6, i32 4, i32 1, i32 6) + tail call void @llvm.riscv.esp.vmax.u32.st.incp(i32 4, i32 7, i32 0, i32 6, i32 1) + tail call void @llvm.riscv.esp.vmax.u8(i32 2, i32 0, i32 5) + tail call void @llvm.riscv.esp.vmax.u8.ld.incp(i32 6, i32 1, i32 1, i32 5, i32 0) + tail call void @llvm.riscv.esp.vmax.u8.st.incp(i32 6, i32 7, i32 7, i32 10, i32 1) + tail call void @llvm.riscv.esp.vmin.s16(i32 1, i32 3, i32 4) + tail call void @llvm.riscv.esp.vmin.s16.ld.incp(i32 2, i32 1, i32 3, i32 2, i32 2) + tail call void @llvm.riscv.esp.vmin.s16.st.incp(i32 7, i32 6, i32 2, i32 7, i32 1) + tail call void @llvm.riscv.esp.vmin.s32(i32 0, i32 3, i32 2) + tail call void @llvm.riscv.esp.vmin.s32.ld.incp(i32 7, i32 6, i32 14, i32 5, i32 1) + tail call void @llvm.riscv.esp.vmin.s32.st.incp(i32 5, i32 1, i32 7, i32 6, i32 5) + tail call void @llvm.riscv.esp.vmin.s8(i32 3, i32 6, i32 2) + tail call void @llvm.riscv.esp.vmin.s8.ld.incp(i32 4, i32 3, i32 11, i32 1, i32 7) + tail call void @llvm.riscv.esp.vmin.s8.st.incp(i32 0, i32 1, i32 1, i32 6, i32 4) + tail call void @llvm.riscv.esp.vmin.u16(i32 3, i32 7, i32 4) + tail call void @llvm.riscv.esp.vmin.u16.ld.incp(i32 6, i32 6, i32 11, i32 5, i32 4) + tail call void @llvm.riscv.esp.vmin.u16.st.incp(i32 6, i32 0, i32 1, i32 12, i32 2) + tail call void @llvm.riscv.esp.vmin.u32(i32 0, i32 7, i32 5) + tail call void @llvm.riscv.esp.vmin.u32.ld.incp(i32 5, i32 6, i32 13, i32 6, i32 7) + tail call void @llvm.riscv.esp.vmin.u32.st.incp(i32 3, i32 7, i32 5, i32 4, i32 4) + tail call void @llvm.riscv.esp.vmin.u8(i32 5, i32 5, i32 7) + tail call void @llvm.riscv.esp.vmin.u8.ld.incp(i32 0, i32 5, i32 0, i32 5, i32 2) + tail call void @llvm.riscv.esp.vmin.u8.st.incp(i32 6, i32 6, i32 2, i32 12, i32 1) + tail call void @llvm.riscv.esp.vmul.s16(i32 2, i32 1, i32 6) + tail call void @llvm.riscv.esp.vmul.s16.ld.incp(i32 6, i32 7, i32 10, i32 3, i32 0) + tail call void @llvm.riscv.esp.vmul.s16.s8xs8(i32 3, i32 5, i32 7, i32 0) + tail call void @llvm.riscv.esp.vmul.s16.st.incp(i32 7, i32 1, i32 2, i32 3, i32 0) + tail call void @llvm.riscv.esp.vmul.s32.s16xs16(i32 5, i32 2, i32 3, i32 4) + tail call void @llvm.riscv.esp.vmul.s8(i32 4, i32 0, i32 3) + tail call void @llvm.riscv.esp.vmul.s8.ld.incp(i32 2, i32 3, i32 0, i32 2, i32 0) + tail call void @llvm.riscv.esp.vmul.s8.st.incp(i32 0, i32 7, i32 0, i32 8, i32 3) + tail call void @llvm.riscv.esp.vmul.u16(i32 3, i32 7, i32 2) + tail call void @llvm.riscv.esp.vmul.u16.ld.incp(i32 6, i32 6, i32 7, i32 5, i32 5) + tail call void @llvm.riscv.esp.vmul.u16.st.incp(i32 4, i32 4, i32 3, i32 3, i32 2) + tail call void @llvm.riscv.esp.vmul.u8(i32 3, i32 7, i32 7) + tail call void @llvm.riscv.esp.vmul.u8.ld.incp(i32 0, i32 6, i32 9, i32 1, i32 0) + tail call void @llvm.riscv.esp.vmul.u8.st.incp(i32 0, i32 3, i32 7, i32 6, i32 4) + tail call void @llvm.riscv.esp.vprelu.s16(i32 8, i32 3, i32 4, i32 1) + tail call void @llvm.riscv.esp.vprelu.s8(i32 3, i32 5, i32 4, i32 2) + tail call void @llvm.riscv.esp.vrelu.s16(i32 8, i32 9, i32 6) + tail call void @llvm.riscv.esp.vrelu.s8(i32 12, i32 1, i32 5) + tail call void @llvm.riscv.esp.vsadds.s16(i32 12, i32 3, i32 3) + tail call void @llvm.riscv.esp.vsadds.s8(i32 7, i32 1, i32 7) + tail call void @llvm.riscv.esp.vsadds.u16(i32 4, i32 2, i32 3) + tail call void @llvm.riscv.esp.vsadds.u8(i32 8, i32 3, i32 2) + tail call void @llvm.riscv.esp.vsat.s16(i32 9, i32 1, i32 0, i32 5) + tail call void @llvm.riscv.esp.vsat.s32(i32 12, i32 9, i32 3, i32 3) + tail call void @llvm.riscv.esp.vsat.s8(i32 10, i32 2, i32 7, i32 0) + tail call void @llvm.riscv.esp.vsat.u16(i32 7, i32 7, i32 7, i32 3) + tail call void @llvm.riscv.esp.vsat.u32(i32 8, i32 2, i32 5, i32 3) + tail call void @llvm.riscv.esp.vsat.u8(i32 1, i32 13, i32 6, i32 0) + tail call void @llvm.riscv.esp.vssubs.s16(i32 3, i32 7, i32 3) + tail call void @llvm.riscv.esp.vssubs.s8(i32 14, i32 0, i32 7) + tail call void @llvm.riscv.esp.vssubs.u16(i32 0, i32 4, i32 5) + tail call void @llvm.riscv.esp.vssubs.u8(i32 11, i32 1, i32 5) + tail call void @llvm.riscv.esp.vsub.s16(i32 0, i32 6, i32 0) + tail call void @llvm.riscv.esp.vsub.s16.ld.incp(i32 3, i32 7, i32 6, i32 2, i32 2) + tail call void @llvm.riscv.esp.vsub.s16.st.incp(i32 0, i32 3, i32 7, i32 0, i32 0) + tail call void @llvm.riscv.esp.vsub.s32(i32 2, i32 7, i32 7) + tail call void @llvm.riscv.esp.vsub.s32.ld.incp(i32 2, i32 0, i32 7, i32 3, i32 4) + tail call void @llvm.riscv.esp.vsub.s32.st.incp(i32 1, i32 1, i32 4, i32 5, i32 1) + tail call void @llvm.riscv.esp.vsub.s8(i32 5, i32 6, i32 7) + tail call void @llvm.riscv.esp.vsub.s8.ld.incp(i32 2, i32 6, i32 1, i32 1, i32 4) + tail call void @llvm.riscv.esp.vsub.s8.st.incp(i32 2, i32 3, i32 5, i32 4, i32 4) + tail call void @llvm.riscv.esp.vsub.u16(i32 7, i32 0, i32 5) + tail call void @llvm.riscv.esp.vsub.u16.ld.incp(i32 7, i32 5, i32 11, i32 0, i32 4) + tail call void @llvm.riscv.esp.vsub.u16.st.incp(i32 3, i32 1, i32 0, i32 11, i32 1) + tail call void @llvm.riscv.esp.vsub.u32(i32 4, i32 2, i32 5) + tail call void @llvm.riscv.esp.vsub.u32.ld.incp(i32 4, i32 2, i32 0, i32 2, i32 4) + tail call void @llvm.riscv.esp.vsub.u32.st.incp(i32 7, i32 4, i32 0, i32 11, i32 7) + tail call void @llvm.riscv.esp.vsub.u8(i32 5, i32 4, i32 6) + tail call void @llvm.riscv.esp.vsub.u8.ld.incp(i32 2, i32 4, i32 0, i32 6, i32 4) + tail call void @llvm.riscv.esp.vsub.u8.st.incp(i32 2, i32 0, i32 3, i32 11, i32 3) + tail call void @llvm.riscv.esp.addx2(i32 5, i32 14, i32 4) + tail call void @llvm.riscv.esp.addx4(i32 14, i32 5, i32 4) + tail call void @llvm.riscv.esp.sat(i32 6, i32 2, i32 4) + tail call void @llvm.riscv.esp.subx2(i32 2, i32 2, i32 9) + tail call void @llvm.riscv.esp.subx4(i32 0, i32 9, i32 3) + tail call void @llvm.riscv.esp.andq(i32 6, i32 3, i32 7) + tail call void @llvm.riscv.esp.notq(i32 5, i32 6) + tail call void @llvm.riscv.esp.orq(i32 1, i32 0, i32 1) + tail call void @llvm.riscv.esp.xorq(i32 1, i32 6, i32 5) + tail call void @llvm.riscv.esp.vcmp.eq.s16(i32 0, i32 2, i32 1) + tail call void @llvm.riscv.esp.vcmp.eq.s32(i32 1, i32 6, i32 5) + tail call void @llvm.riscv.esp.vcmp.eq.s8(i32 0, i32 3, i32 2) + tail call void @llvm.riscv.esp.vcmp.eq.u16(i32 7, i32 1, i32 7) + tail call void @llvm.riscv.esp.vcmp.eq.u32(i32 1, i32 2, i32 2) + tail call void @llvm.riscv.esp.vcmp.eq.u8(i32 1, i32 6, i32 3) + tail call void @llvm.riscv.esp.vcmp.gt.s16(i32 5, i32 6, i32 4) + tail call void @llvm.riscv.esp.vcmp.gt.s32(i32 6, i32 2, i32 0) + tail call void @llvm.riscv.esp.vcmp.gt.s8(i32 3, i32 5, i32 2) + tail call void @llvm.riscv.esp.vcmp.gt.u16(i32 7, i32 4, i32 7) + tail call void @llvm.riscv.esp.vcmp.gt.u32(i32 6, i32 2, i32 2) + tail call void @llvm.riscv.esp.vcmp.gt.u8(i32 2, i32 0, i32 0) + tail call void @llvm.riscv.esp.vcmp.lt.s16(i32 2, i32 1, i32 7) + tail call void @llvm.riscv.esp.vcmp.lt.s32(i32 2, i32 1, i32 4) + tail call void @llvm.riscv.esp.vcmp.lt.s8(i32 5, i32 2, i32 6) + tail call void @llvm.riscv.esp.vcmp.lt.u16(i32 1, i32 5, i32 4) + tail call void @llvm.riscv.esp.vcmp.lt.u32(i32 5, i32 6, i32 2) + tail call void @llvm.riscv.esp.vcmp.lt.u8(i32 3, i32 5, i32 5) + tail call void @llvm.riscv.esp.mov.s16.qacc(i32 2) + tail call void @llvm.riscv.esp.mov.s8.qacc(i32 5) + tail call void @llvm.riscv.esp.mov.u16.qacc(i32 5) + tail call void @llvm.riscv.esp.mov.u8.qacc(i32 3) + tail call void @llvm.riscv.esp.movi.16.a(i32 2, i32 3, i32 1) + tail call void @llvm.riscv.esp.movi.16.q(i32 7, i32 13, i32 3) + tail call void @llvm.riscv.esp.movi.32.a(i32 6, i32 1, i32 14) + tail call void @llvm.riscv.esp.movi.32.q(i32 9, i32 1, i32 5) + tail call void @llvm.riscv.esp.movi.8.a(i32 5, i32 15, i32 14) + tail call void @llvm.riscv.esp.movi.8.q(i32 0, i32 6, i32 1) + tail call void @llvm.riscv.esp.movx.r.cfg(i32 5) + tail call void @llvm.riscv.esp.movx.r.fft.bit.width(i32 2) + tail call void @llvm.riscv.esp.movx.r.perf(i32 3, i32 33) + tail call void @llvm.riscv.esp.movx.r.sar(i32 5) + tail call void @llvm.riscv.esp.movx.r.sar.bytes(i32 6) + tail call void @llvm.riscv.esp.movx.r.xacc.h(i32 10) + tail call void @llvm.riscv.esp.movx.r.xacc.l(i32 12) + tail call void @llvm.riscv.esp.movx.w.cfg(i32 10) + tail call void @llvm.riscv.esp.movx.w.fft.bit.width(i32 1) + tail call void @llvm.riscv.esp.movx.w.perf(i32 8) + tail call void @llvm.riscv.esp.movx.w.sar(i32 3) + tail call void @llvm.riscv.esp.movx.w.sar.bytes(i32 4) + tail call void @llvm.riscv.esp.movx.w.xacc.h(i32 8) + tail call void @llvm.riscv.esp.movx.w.xacc.l(i32 4) + tail call void @llvm.riscv.esp.vext.s16(i32 6, i32 7, i32 0) + tail call void @llvm.riscv.esp.vext.s8(i32 3, i32 5, i32 3) + tail call void @llvm.riscv.esp.vext.u16(i32 6, i32 4, i32 2) + tail call void @llvm.riscv.esp.vext.u8(i32 0, i32 4, i32 0) + tail call void @llvm.riscv.esp.vunzip.16(i32 1, i32 0) + tail call void @llvm.riscv.esp.vunzip.32(i32 6, i32 4) + tail call void @llvm.riscv.esp.vunzip.8(i32 2, i32 1) + tail call void @llvm.riscv.esp.vunzipt.16(i32 7, i32 0, i32 2) + tail call void @llvm.riscv.esp.vunzipt.8(i32 0, i32 6, i32 2) + tail call void @llvm.riscv.esp.vzip.16(i32 1, i32 6) + tail call void @llvm.riscv.esp.vzip.32(i32 4, i32 6) + tail call void @llvm.riscv.esp.vzip.8(i32 4, i32 0) + tail call void @llvm.riscv.esp.vzipt.16(i32 0, i32 3, i32 5) + tail call void @llvm.riscv.esp.vzipt.8(i32 6, i32 1, i32 5) + tail call void @llvm.riscv.esp.zero.q(i32 5) + tail call void @llvm.riscv.esp.zero.qacc() + tail call void @llvm.riscv.esp.zero.xacc() + tail call void @llvm.riscv.esp.fft.ams.s16.ld.incp(i32 3, i32 1, i32 0, i32 3, i32 0, i32 6, i32 6, i32 0) + tail call void @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32 3, i32 0, i32 1, i32 5, i32 0, i32 3, i32 0, i32 2) + tail call void @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32 3, i32 5, i32 1, i32 3, i32 1, i32 7, i32 0, i32 6) + tail call void @llvm.riscv.esp.fft.ams.s16.st.incp(i32 5, i32 6, i32 3, i32 5, i32 4, i32 2, i32 0, i32 7) + tail call void @llvm.riscv.esp.fft.bitrev(i32 2, i32 7) + tail call void @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32 1, i32 7, i32 7, i32 4, i32 1, i32 3, i32 2) + tail call void @llvm.riscv.esp.fft.cmul.s16.st.xp(i32 2, i32 0, i32 7, i32 4, i32 4, i32 1, i32 3, i32 4) + tail call void @llvm.riscv.esp.fft.r2bf.s16(i32 5, i32 1, i32 0, i32 7, i32 3) + tail call void @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32 7, i32 4, i32 10, i32 2, i32 7) + tail call void @llvm.riscv.esp.fft.vst.r32.decp(i32 5, i32 7, i32 1) + tail call void @llvm.riscv.esp.ld.128.usar.ip(i32 4, i32 608, i32 1) + tail call void @llvm.riscv.esp.ld.128.usar.xp(i32 8, i32 1, i32 2) + tail call void @llvm.riscv.esp.ld.xacc.ip(i32 6, i32 400) + tail call void @llvm.riscv.esp.ldqa.s16.128.ip(i32 13, i32 912) + tail call void @llvm.riscv.esp.ldqa.s16.128.xp(i32 10, i32 2) + tail call void @llvm.riscv.esp.ldqa.s8.128.ip(i32 1, i32 1824) + tail call void @llvm.riscv.esp.ldqa.s8.128.xp(i32 4, i32 9) + tail call void @llvm.riscv.esp.ldqa.u16.128.ip(i32 4, i32 -1904) + tail call void @llvm.riscv.esp.ldqa.u16.128.xp(i32 14, i32 6) + tail call void @llvm.riscv.esp.ldqa.u8.128.ip(i32 3, i32 1216) + tail call void @llvm.riscv.esp.ldqa.u8.128.xp(i32 6, i32 2) + tail call void @llvm.riscv.esp.vldbc.16.ip(i32 9, i32 -448, i32 7) + tail call void @llvm.riscv.esp.vldbc.16.xp(i32 9, i32 5, i32 3) + tail call void @llvm.riscv.esp.vldbc.32.ip(i32 0, i32 220, i32 3) + tail call void @llvm.riscv.esp.vldbc.32.xp(i32 2, i32 12, i32 7) + tail call void @llvm.riscv.esp.vldbc.8.ip(i32 12, i32 396, i32 2) + tail call void @llvm.riscv.esp.vldbc.8.xp(i32 9, i32 4, i32 7) + tail call void @llvm.riscv.esp.vldext.s16.ip(i32 13, i32 16, i32 7, i32 4) + tail call void @llvm.riscv.esp.vldext.s16.xp(i32 8, i32 0, i32 5, i32 0) + tail call void @llvm.riscv.esp.vldext.s8.ip(i32 4, i32 80, i32 3, i32 6) + tail call void @llvm.riscv.esp.vldext.s8.xp(i32 6, i32 3, i32 1, i32 1) + tail call void @llvm.riscv.esp.vldext.u16.ip(i32 14, i32 48, i32 2, i32 5) + tail call void @llvm.riscv.esp.vldext.u16.xp(i32 12, i32 7, i32 2, i32 0) + tail call void @llvm.riscv.esp.vldext.u8.ip(i32 13, i32 64, i32 7, i32 2) + tail call void @llvm.riscv.esp.vldext.u8.xp(i32 6, i32 6, i32 7, i32 2) + tail call void @llvm.riscv.esp.vldhbc.16.incp(i32 1, i32 4, i32 7) + tail call void @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32 6, i32 512) + tail call void @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32 5, i32 -784) + tail call void @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32 10, i32 -800) + tail call void @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32 10, i32 -1952) + tail call void @llvm.riscv.esp.ld.ua.state.ip(i32 8, i32 -752) + tail call void @llvm.riscv.esp.ldxq.32(i32 0, i32 4, i32 2, i32 4, i32 7) + tail call void @llvm.riscv.esp.st.qacc.h.h.128.ip(i32 13, i32 -336) + tail call void @llvm.riscv.esp.st.qacc.h.l.128.ip(i32 8, i32 1568) + tail call void @llvm.riscv.esp.st.qacc.l.h.128.ip(i32 4, i32 16) + tail call void @llvm.riscv.esp.st.qacc.l.l.128.ip(i32 8, i32 416) + tail call void @llvm.riscv.esp.st.ua.state.ip(i32 7, i32 -1360) + tail call void @llvm.riscv.esp.stxq.32(i32 6, i32 6, i32 0, i32 2, i32 5) + tail call void @llvm.riscv.esp.vld.128.ip(i32 8, i32 784, i32 3) + tail call void @llvm.riscv.esp.vld.128.xp(i32 9, i32 7, i32 3) + tail call void @llvm.riscv.esp.vld.h.64.ip(i32 0, i32 -352, i32 0) + tail call void @llvm.riscv.esp.vld.h.64.xp(i32 14, i32 2, i32 2) + tail call void @llvm.riscv.esp.vld.l.64.ip(i32 6, i32 56, i32 2) + tail call void @llvm.riscv.esp.vld.l.64.xp(i32 1, i32 9, i32 5) + tail call void @llvm.riscv.esp.vst.128.ip(i32 5, i32 6, i32 -960) + tail call void @llvm.riscv.esp.vst.128.xp(i32 7, i32 6, i32 13) + tail call void @llvm.riscv.esp.vst.h.64.ip(i32 7, i32 4, i32 944) + tail call void @llvm.riscv.esp.vst.h.64.xp(i32 10, i32 7, i32 7) + tail call void @llvm.riscv.esp.vst.l.64.ip(i32 5, i32 3, i32 984) + tail call void @llvm.riscv.esp.vst.l.64.xp(i32 12, i32 5, i32 3) + tail call void @llvm.riscv.esp.slci.2q(i32 1, i32 5, i32 12) + tail call void @llvm.riscv.esp.slcxxp.2q(i32 5, i32 5, i32 2, i32 3) + tail call void @llvm.riscv.esp.src.q(i32 3, i32 1, i32 2) + tail call void @llvm.riscv.esp.src.q.ld.ip(i32 5, i32 0, i32 5, i32 -272, i32 0) + tail call void @llvm.riscv.esp.src.q.ld.xp(i32 3, i32 6, i32 12, i32 7, i32 1) + tail call void @llvm.riscv.esp.src.q.qup(i32 4, i32 7, i32 3) + tail call void @llvm.riscv.esp.srci.2q(i32 2, i32 3, i32 7) + tail call void @llvm.riscv.esp.srcmb.s16.q.qacc(i32 4, i32 0, i32 2) + tail call void @llvm.riscv.esp.srcmb.s16.qacc(i32 13, i32 1, i32 5) + tail call void @llvm.riscv.esp.srcmb.s8.q.qacc(i32 4, i32 0, i32 5) + tail call void @llvm.riscv.esp.srcmb.s8.qacc(i32 8, i32 1, i32 1) + tail call void @llvm.riscv.esp.srcmb.u16.q.qacc(i32 3, i32 1, i32 0) + tail call void @llvm.riscv.esp.srcmb.u16.qacc(i32 13, i32 0, i32 7) + tail call void @llvm.riscv.esp.srcmb.u8.q.qacc(i32 5, i32 1, i32 3) + tail call void @llvm.riscv.esp.srcmb.u8.qacc(i32 5, i32 0, i32 0) + tail call void @llvm.riscv.esp.srcq.128.st.incp(i32 5, i32 0, i32 12) + tail call void @llvm.riscv.esp.srcxxp.2q(i32 12, i32 6, i32 4, i32 6) + tail call void @llvm.riscv.esp.srs.s.xacc(i32 6, i32 13) + tail call void @llvm.riscv.esp.srs.u.xacc(i32 3, i32 12) + tail call void @llvm.riscv.esp.vsl.32(i32 2, i32 5) + tail call void @llvm.riscv.esp.vsld.16(i32 3, i32 7, i32 3) + tail call void @llvm.riscv.esp.vsld.32(i32 7, i32 1, i32 3) + tail call void @llvm.riscv.esp.vsld.8(i32 1, i32 5, i32 0) + tail call void @llvm.riscv.esp.vsr.s32(i32 0, i32 3) + tail call void @llvm.riscv.esp.vsr.u32(i32 2, i32 1) + tail call void @llvm.riscv.esp.vsrd.16(i32 3, i32 0, i32 4) + tail call void @llvm.riscv.esp.vsrd.32(i32 6, i32 3, i32 0) + tail call void @llvm.riscv.esp.vsrd.8(i32 4, i32 1, i32 5) + tail call void @llvm.riscv.esp.st.s.xacc.ip(i32 8, i32 80) + tail call void @llvm.riscv.esp.st.u.xacc.ip(i32 6, i32 -464) + ret void +} + +declare void @llvm.riscv.esp.vcmulas.s16.qacc.h(i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.h.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.l(i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s16.qacc.l.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.h(i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.h.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.l(i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmulas.s8.qacc.l.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc.st.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.xacc.st.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.s8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u16.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmulas.u8.qacc.ldbc.incp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.s16.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.s16.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.s8.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.s8.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.u16.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.u16.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.u8.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsmulas.u8.qacc.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s16.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s16.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s8.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.s8.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u16.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u16.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u8.ld.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.cmul.u8.st.incp(i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.max.s16.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.s32.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.s8.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.u16.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.u32.a(i32, i32) nounwind +declare void @llvm.riscv.esp.max.u8.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.s16.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.s32.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.s8.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.u16.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.u32.a(i32, i32) nounwind +declare void @llvm.riscv.esp.min.u8.a(i32, i32) nounwind +declare void @llvm.riscv.esp.vabs.16(i32, i32) nounwind +declare void @llvm.riscv.esp.vabs.32(i32, i32) nounwind +declare void @llvm.riscv.esp.vabs.8(i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vadd.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vclamp.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmax.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmin.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s16.s8xs8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s32.s16xs16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vmul.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vprelu.s16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vprelu.s8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vrelu.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vrelu.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsadds.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsadds.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsadds.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsadds.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.s16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.s32(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.s8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.u16(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.u32(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsat.u8(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vssubs.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vssubs.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vssubs.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vssubs.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.s8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u16.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u32.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u32.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u8.ld.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsub.u8.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.addx2(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.addx4(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.sat(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.subx2(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.subx4(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.andq(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.notq(i32, i32) nounwind +declare void @llvm.riscv.esp.orq(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.xorq(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.eq.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.gt.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.s32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.u32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vcmp.lt.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.mov.s16.qacc(i32) nounwind +declare void @llvm.riscv.esp.mov.s8.qacc(i32) nounwind +declare void @llvm.riscv.esp.mov.u16.qacc(i32) nounwind +declare void @llvm.riscv.esp.mov.u8.qacc(i32) nounwind +declare void @llvm.riscv.esp.movi.16.a(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.16.q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.32.a(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.32.q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.8.a(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movi.8.q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.movx.r.cfg(i32) nounwind +declare void @llvm.riscv.esp.movx.r.fft.bit.width(i32) nounwind +declare void @llvm.riscv.esp.movx.r.perf(i32, i32) nounwind +declare void @llvm.riscv.esp.movx.r.sar(i32) nounwind +declare void @llvm.riscv.esp.movx.r.sar.bytes(i32) nounwind +declare void @llvm.riscv.esp.movx.r.xacc.h(i32) nounwind +declare void @llvm.riscv.esp.movx.r.xacc.l(i32) nounwind +declare void @llvm.riscv.esp.movx.w.cfg(i32) nounwind +declare void @llvm.riscv.esp.movx.w.fft.bit.width(i32) nounwind +declare void @llvm.riscv.esp.movx.w.perf(i32) nounwind +declare void @llvm.riscv.esp.movx.w.sar(i32) nounwind +declare void @llvm.riscv.esp.movx.w.sar.bytes(i32) nounwind +declare void @llvm.riscv.esp.movx.w.xacc.h(i32) nounwind +declare void @llvm.riscv.esp.movx.w.xacc.l(i32) nounwind +declare void @llvm.riscv.esp.vext.s16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vext.s8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vext.u16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vext.u8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vunzip.16(i32, i32) nounwind +declare void @llvm.riscv.esp.vunzip.32(i32, i32) nounwind +declare void @llvm.riscv.esp.vunzip.8(i32, i32) nounwind +declare void @llvm.riscv.esp.vunzipt.16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vunzipt.8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vzip.16(i32, i32) nounwind +declare void @llvm.riscv.esp.vzip.32(i32, i32) nounwind +declare void @llvm.riscv.esp.vzip.8(i32, i32) nounwind +declare void @llvm.riscv.esp.vzipt.16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vzipt.8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.zero.q(i32) nounwind +declare void @llvm.riscv.esp.zero.qacc() nounwind +declare void @llvm.riscv.esp.zero.xacc() nounwind +declare void @llvm.riscv.esp.fft.ams.s16.ld.incp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.ams.s16.ld.incp.uaup(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.ams.s16.ld.r32.decp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.ams.s16.st.incp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.bitrev(i32, i32) nounwind +declare void @llvm.riscv.esp.fft.cmul.s16.ld.xp(i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.cmul.s16.st.xp(i32, i32, i32, i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.r2bf.s16(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.r2bf.s16.st.incp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.fft.vst.r32.decp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.ld.128.usar.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.ld.128.usar.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.ld.xacc.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.s16.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.s16.128.xp(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.s8.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.s8.128.xp(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.u16.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.u16.128.xp(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.u8.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldqa.u8.128.xp(i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.16.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.16.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.32.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.32.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.8.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldbc.8.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.s16.ip(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.s16.xp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.s8.ip(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.s8.xp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.u16.ip(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.u16.xp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.u8.ip(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldext.u8.xp(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vldhbc.16.incp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.ld.qacc.h.h.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ld.qacc.h.l.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ld.qacc.l.h.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ld.qacc.l.l.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ld.ua.state.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.ldxq.32(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.st.qacc.h.h.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.qacc.h.l.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.qacc.l.h.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.qacc.l.l.128.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.ua.state.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.stxq.32(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.128.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.128.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.h.64.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.h.64.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.l.64.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vld.l.64.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.128.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.128.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.h.64.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.h.64.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.l.64.ip(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vst.l.64.xp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.slci.2q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.slcxxp.2q(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.src.q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.src.q.ld.ip(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.src.q.ld.xp(i32, i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.src.q.qup(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srci.2q(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.s16.q.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.s16.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.s8.q.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.s8.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.u16.q.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.u16.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.u8.q.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcmb.u8.qacc(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcq.128.st.incp(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srcxxp.2q(i32, i32, i32, i32) nounwind +declare void @llvm.riscv.esp.srs.s.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.srs.u.xacc(i32, i32) nounwind +declare void @llvm.riscv.esp.vsl.32(i32, i32) nounwind +declare void @llvm.riscv.esp.vsld.16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsld.32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsld.8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsr.s32(i32, i32) nounwind +declare void @llvm.riscv.esp.vsr.u32(i32, i32) nounwind +declare void @llvm.riscv.esp.vsrd.16(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsrd.32(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.vsrd.8(i32, i32, i32) nounwind +declare void @llvm.riscv.esp.st.s.xacc.ip(i32, i32) nounwind +declare void @llvm.riscv.esp.st.u.xacc.ip(i32, i32) nounwind diff --git a/llvm/test/MC/RISCV/esp32p4-hwlp-valid.s b/llvm/test/MC/RISCV/esp32p4-hwlp-valid.s new file mode 100644 index 00000000000000..f2d8e11550ec89 --- /dev/null +++ b/llvm/test/MC/RISCV/esp32p4-hwlp-valid.s @@ -0,0 +1,22 @@ +# RUN: llvm-mc %s -triple=riscv32 -mcpu=esp32p4 -show-encoding | FileCheck -check-prefixes=CHECK %s + +dl_hwlp_test: +# CHECK: dl_hwlp_test: + esp.lp.setup 0, a1, loop_last_instruction +# CHECK: esp.lp.setup 0, a1, loop_last_instruction # encoding: [0x2b'A',0xc0'A',0x05'A',A] + esp.lp.starti 0, loop_last_instruction +# CHECK: esp.lp.starti 0, loop_last_instruction # encoding: [0x2b'A',A,A,A] + esp.lp.counti 0, 4000 +# CHECK: esp.lp.counti 0, 4000 # encoding: [0x2b,0x30,0x00,0xfa] + esp.lp.count 0, a1 +# CHECK: esp.lp.count 0, a1 # encoding: [0x2b,0x20,0x00,0x00] + esp.lp.setupi 0, 1234, loop_last_instruction +# CHECK: esp.lp.setupi 0, 1234, loop_last_instruction # encoding: [0x2b'A',0x50'A',0x20'A',0x4d'A'] + # lp.setup 0, a1, loop_last_instruction +# CHECK: # fixup A - offset: 0, value: loop_last_instruction, kind: fixup_riscv_branch + loop_last_instruction: +# CHECK: loop_last_instruction: + addi a0, a0, 1 +# CHECK: addi a0, a0, 1 # encoding: [0x05,0x05] + ret +# CHECK: ret # encoding: [0x82,0x80] diff --git a/llvm/test/MC/RISCV/esp32p4-valid.s b/llvm/test/MC/RISCV/esp32p4-valid.s new file mode 100644 index 00000000000000..d64dadffb322f1 --- /dev/null +++ b/llvm/test/MC/RISCV/esp32p4-valid.s @@ -0,0 +1,710 @@ +# RUN: llvm-mc %s -triple=riscv32 -mcpu=esp32p4 -show-encoding | FileCheck -check-prefixes=CHECK %s + +esp.vcmulas.s16.qacc.h q2, q2 +# CHECK: esp.vcmulas.s16.qacc.h q2, q2 # encoding: [0x5f,0x80,0x87,0x4a] +esp.vcmulas.s16.qacc.h.ld.ip q5, a3, -16, q0, q4 +# CHECK: esp.vcmulas.s16.qacc.h.ld.ip q5, a3, -16, q0, q4 # encoding: [0x3b,0xf7,0xfa,0x13] +esp.vcmulas.s16.qacc.h.ld.xp q0, a4, a5, q2, q3 +# CHECK: esp.vcmulas.s16.qacc.h.ld.xp q0, a4, a5, q2, q3 # encoding: [0x7f,0x20,0x73,0x4f] +esp.vcmulas.s16.qacc.l q3, q5 +# CHECK: esp.vcmulas.s16.qacc.l q3, q5 # encoding: [0x5f,0x80,0x83,0x76] +esp.vcmulas.s16.qacc.l.ld.ip q6, a1, -32, q6, q5 +# CHECK: esp.vcmulas.s16.qacc.l.ld.ip q6, a1, -32, q6, q5 # encoding: [0x3b,0xfb,0xf1,0xd5] +esp.vcmulas.s16.qacc.l.ld.xp q6, a2, a0, q4, q5 +# CHECK: esp.vcmulas.s16.qacc.l.ld.xp q6, a2, a0, q4, q5 # encoding: [0x7f,0x38,0x22,0x95] +esp.vcmulas.s8.qacc.h q4, q2 +# CHECK: esp.vcmulas.s8.qacc.h q4, q2 # encoding: [0x5f,0x80,0x85,0x8a] +esp.vcmulas.s8.qacc.h.ld.ip q5, a1, 96, q3, q4 +# CHECK: esp.vcmulas.s8.qacc.h.ld.ip q5, a1, 96, q3, q4 # encoding: [0x3b,0xf7,0xb1,0x72] +esp.vcmulas.s8.qacc.h.ld.xp q6, a1, a3, q1, q6 +# CHECK: esp.vcmulas.s8.qacc.h.ld.xp q6, a1, a3, q1, q6 # encoding: [0x7f,0xb8,0x51,0x3a] +esp.vcmulas.s8.qacc.l q0, q2 +# CHECK: esp.vcmulas.s8.qacc.l q0, q2 # encoding: [0x5f,0x80,0x81,0x0a] +esp.vcmulas.s8.qacc.l.ld.ip q4, a2, -128, q0, q2 +# CHECK: esp.vcmulas.s8.qacc.l.ld.ip q4, a2, -128, q0, q2 # encoding: [0x3b,0x73,0xc2,0x08] +esp.vcmulas.s8.qacc.l.ld.xp q0, a5, a1, q2, q2 +# CHECK: esp.vcmulas.s8.qacc.l.ld.xp q0, a5, a1, q2, q2 # encoding: [0x7f,0xa0,0x33,0x48] +esp.vmulas.s16.qacc q2, q3 +# CHECK: esp.vmulas.s16.qacc q2, q3 # encoding: [0x5f,0x00,0xc7,0x4e] +esp.vmulas.s16.qacc.ld.ip q1, a5, -112, q1, q4 +# CHECK: esp.vmulas.s16.qacc.ld.ip q1, a5, -112, q1, q4 # encoding: [0xbb,0xe6,0xe3,0x32] +esp.vmulas.s16.qacc.ld.xp q4, a4, a1, q6, q3 +# CHECK: esp.vmulas.s16.qacc.ld.xp q4, a4, a1, q6, q3 # encoding: [0xff,0x32,0x3b,0xce] +esp.vmulas.s16.qacc.st.ip q6, a5, -96, q5, q2 +# CHECK: esp.vmulas.s16.qacc.st.ip q6, a5, -96, q5, q2 # encoding: [0xbb,0xf8,0xeb,0xab] +esp.vmulas.s16.qacc.st.xp q4, a2, a5, q3, q1 +# CHECK: esp.vmulas.s16.qacc.st.xp q4, a2, a5, q3, q1 # encoding: [0xff,0x32,0x7a,0x67] +esp.vmulas.s16.xacc q5, q2 +# CHECK: esp.vmulas.s16.xacc q5, q2 # encoding: [0x5f,0x00,0xc3,0xaa] +esp.vmulas.s16.xacc.ld.ip q4, a3, 48, q3, q6 +# CHECK: esp.vmulas.s16.xacc.ld.ip q4, a3, 48, q3, q6 # encoding: [0xbb,0xf2,0xca,0x78] +esp.vmulas.s16.xacc.ld.xp q6, a2, a5, q0, q0 +# CHECK: esp.vmulas.s16.xacc.ld.xp q6, a2, a5, q0, q0 # encoding: [0xff,0x3a,0x7a,0x00] +esp.vmulas.s16.xacc.st.ip q1, a1, 16, q4, q3 +# CHECK: esp.vmulas.s16.xacc.st.ip q1, a1, 16, q4, q3 # encoding: [0xbb,0xe6,0xc1,0x8d] +esp.vmulas.s16.xacc.st.xp q2, a4, a0, q5, q0 +# CHECK: esp.vmulas.s16.xacc.st.xp q2, a4, a0, q5, q0 # encoding: [0xff,0x2a,0x2b,0xa1] +esp.vmulas.s8.qacc q4, q6 +# CHECK: esp.vmulas.s8.qacc q4, q6 # encoding: [0x5f,0x00,0xc5,0x9a] +esp.vmulas.s8.qacc.ld.ip q5, a2, 64, q5, q0 +# CHECK: esp.vmulas.s8.qacc.ld.ip q5, a2, 64, q5, q0 # encoding: [0xbb,0x74,0x52,0xa2] +esp.vmulas.s8.qacc.ld.xp q1, a1, a3, q5, q0 +# CHECK: esp.vmulas.s8.qacc.ld.xp q1, a1, a3, q5, q0 # encoding: [0xff,0xa6,0x51,0xa2] +esp.vmulas.s8.qacc.st.ip q5, a3, 16, q5, q1 +# CHECK: esp.vmulas.s8.qacc.st.ip q5, a3, 16, q5, q1 # encoding: [0xbb,0xf6,0x42,0xa7] +esp.vmulas.s8.qacc.st.xp q5, a1, a4, q4, q0 +# CHECK: esp.vmulas.s8.qacc.st.xp q5, a1, a4, q4, q0 # encoding: [0xff,0xb6,0x61,0x83] +esp.vmulas.s8.xacc q1, q0 +# CHECK: esp.vmulas.s8.xacc q1, q0 # encoding: [0x5f,0x00,0xc1,0x22] +esp.vmulas.s8.xacc.ld.ip q0, a4, 16, q4, q4 +# CHECK: esp.vmulas.s8.xacc.ld.ip q0, a4, 16, q4, q4 # encoding: [0xbb,0x62,0x43,0x90] +esp.vmulas.s8.xacc.ld.xp q0, a5, a2, q4, q2 +# CHECK: esp.vmulas.s8.xacc.ld.xp q0, a5, a2, q4, q2 # encoding: [0xff,0xa2,0x43,0x88] +esp.vmulas.s8.xacc.st.ip q3, a3, -32, q0, q5 +# CHECK: esp.vmulas.s8.xacc.st.ip q3, a3, -32, q0, q5 # encoding: [0xbb,0xec,0x7a,0x15] +esp.vmulas.s8.xacc.st.xp q0, a5, a4, q3, q3 +# CHECK: esp.vmulas.s8.xacc.st.xp q0, a5, a4, q3, q3 # encoding: [0xff,0xa2,0x63,0x6d] +esp.vmulas.u16.qacc q2, q0 +# CHECK: esp.vmulas.u16.qacc q2, q0 # encoding: [0x5f,0x00,0xc6,0x42] +esp.vmulas.u16.qacc.ld.ip q5, a4, 80, q1, q3 +# CHECK: esp.vmulas.u16.qacc.ld.ip q5, a4, 80, q1, q3 # encoding: [0xbb,0x76,0x93,0x2e] +esp.vmulas.u16.qacc.ld.xp q5, a5, a3, q6, q3 +# CHECK: esp.vmulas.u16.qacc.ld.xp q5, a5, a3, q6, q3 # encoding: [0xff,0xb4,0x5b,0xce] +esp.vmulas.u16.qacc.st.ip q5, a3, -80, q2, q0 +# CHECK: esp.vmulas.u16.qacc.st.ip q5, a3, -80, q2, q0 # encoding: [0xbb,0xf6,0xaa,0x43] +esp.vmulas.u16.qacc.st.xp q0, a2, a4, q5, q5 +# CHECK: esp.vmulas.u16.qacc.st.xp q0, a2, a4, q5, q5 # encoding: [0xff,0x20,0x6a,0xb7] +esp.vmulas.u16.xacc q2, q6 +# CHECK: esp.vmulas.u16.xacc q2, q6 # encoding: [0x5f,0x00,0xc2,0x5a] +esp.vmulas.u16.xacc.ld.ip q1, a3, 16, q5, q2 +# CHECK: esp.vmulas.u16.xacc.ld.ip q1, a3, 16, q5, q2 # encoding: [0xbb,0xe6,0x82,0xa8] +esp.vmulas.u16.xacc.ld.xp q2, a3, a4, q1, q3 +# CHECK: esp.vmulas.u16.xacc.ld.xp q2, a3, a4, q1, q3 # encoding: [0xff,0xa8,0x6a,0x2c] +esp.vmulas.u16.xacc.st.ip q3, a4, -112, q1, q3 +# CHECK: esp.vmulas.u16.xacc.st.ip q3, a4, -112, q1, q3 # encoding: [0xbb,0x6e,0xa3,0x2d] +esp.vmulas.u16.xacc.st.xp q4, a0, a2, q0, q3 +# CHECK: esp.vmulas.u16.xacc.st.xp q4, a0, a2, q0, q3 # encoding: [0xff,0x30,0x49,0x0d] +esp.vmulas.u8.qacc q6, q1 +# CHECK: esp.vmulas.u8.qacc q6, q1 # encoding: [0x5f,0x00,0xc4,0xc6] +esp.vmulas.u8.qacc.ld.ip q0, a4, -80, q0, q3 +# CHECK: esp.vmulas.u8.qacc.ld.ip q0, a4, -80, q0, q3 # encoding: [0xbb,0x62,0x2b,0x0e] +esp.vmulas.u8.qacc.ld.xp q0, a3, a0, q1, q3 +# CHECK: esp.vmulas.u8.qacc.ld.xp q0, a3, a0, q1, q3 # encoding: [0xff,0xa0,0x22,0x2e] +esp.vmulas.u8.qacc.st.ip q2, a3, 64, q0, q0 +# CHECK: esp.vmulas.u8.qacc.st.ip q2, a3, 64, q0, q0 # encoding: [0xbb,0xe8,0x12,0x03] +esp.vmulas.u8.qacc.st.xp q6, a2, a2, q3, q1 +# CHECK: esp.vmulas.u8.qacc.st.xp q6, a2, a2, q3, q1 # encoding: [0xff,0x38,0x42,0x67] +esp.vmulas.u8.xacc q3, q3 +# CHECK: esp.vmulas.u8.xacc q3, q3 # encoding: [0x5f,0x00,0xc0,0x6e] +esp.vmulas.u8.xacc.ld.ip q0, a5, 16, q0, q1 +# CHECK: esp.vmulas.u8.xacc.ld.ip q0, a5, 16, q0, q1 # encoding: [0xbb,0xe2,0x03,0x04] +esp.vmulas.u8.xacc.ld.xp q4, a0, a2, q1, q5 +# CHECK: esp.vmulas.u8.xacc.ld.xp q4, a0, a2, q1, q5 # encoding: [0xff,0x30,0x41,0x34] +esp.vmulas.u8.xacc.st.ip q4, a1, -48, q4, q6 +# CHECK: esp.vmulas.u8.xacc.st.ip q4, a1, -48, q4, q6 # encoding: [0xbb,0xf2,0x31,0x99] +esp.vmulas.u8.xacc.st.xp q4, a2, a3, q0, q2 +# CHECK: esp.vmulas.u8.xacc.st.xp q4, a2, a3, q0, q2 # encoding: [0xff,0x30,0x52,0x09] +esp.vmulas.s16.qacc.ldbc.incp q5, a2, q0, q6 +# CHECK: esp.vmulas.s16.qacc.ldbc.incp q5, a2, q0, q6 # encoding: [0xbb,0x75,0x62,0x18] +esp.vmulas.s8.qacc.ldbc.incp q3, a5, q4, q2 +# CHECK: esp.vmulas.s8.qacc.ldbc.incp q3, a5, q4, q2 # encoding: [0xbb,0xed,0x23,0x88] +esp.vmulas.u16.qacc.ldbc.incp q2, a1, q1, q3 +# CHECK: esp.vmulas.u16.qacc.ldbc.incp q2, a1, q1, q3 # encoding: [0xbb,0xe9,0x41,0x2c] +esp.vmulas.u8.qacc.ldbc.incp q0, a1, q2, q0 +# CHECK: esp.vmulas.u8.qacc.ldbc.incp q0, a1, q2, q0 # encoding: [0xbb,0xe1,0x01,0x40] +esp.vsmulas.s16.qacc q0, q5, 5 +# CHECK: esp.vsmulas.s16.qacc q0, q5, 5 # encoding: [0x5f,0x80,0xf2,0x16] +esp.vsmulas.s16.qacc.ld.incp q1, a2, q4, q2, 4 +# CHECK: esp.vsmulas.s16.qacc.ld.incp q1, a2, q4, q2, 4 # encoding: [0xbb,0x67,0xa2,0x8b] +esp.vsmulas.s8.qacc q2, q1, 13 +# CHECK: esp.vsmulas.s8.qacc q2, q1, 13 # encoding: [0x5f,0x80,0xb6,0x46] +esp.vsmulas.s8.qacc.ld.incp q2, a5, q1, q3, 0 +# CHECK: esp.vsmulas.s8.qacc.ld.incp q2, a5, q1, q3, 0 # encoding: [0xbb,0xeb,0x83,0x2d] +esp.vsmulas.u16.qacc q6, q1, 5 +# CHECK: esp.vsmulas.u16.qacc q6, q1, 5 # encoding: [0x5f,0x80,0xd2,0xc6] +esp.vsmulas.u16.qacc.ld.incp q0, a0, q6, q6, 0 +# CHECK: esp.vsmulas.u16.qacc.ld.incp q0, a0, q6, q6, 0 # encoding: [0xbb,0x63,0x81,0xda] +esp.vsmulas.u8.qacc q0, q3, 7 +# CHECK: esp.vsmulas.u8.qacc q0, q3, 7 # encoding: [0x5f,0x80,0x93,0x0e] +esp.vsmulas.u8.qacc.ld.incp q6, a0, q6, q5, 8 +# CHECK: esp.vsmulas.u8.qacc.ld.incp q6, a0, q6, q5, 8 # encoding: [0xbb,0x7b,0xc1,0xd4] +esp.cmul.s16 q0, q2, q4, 3 +# CHECK: esp.cmul.s16 q0, q2, q4, 3 # encoding: [0x5f,0xa4,0x07,0x50] +esp.cmul.s16.ld.incp q6, a4, q1, q1, q5, 0 +# CHECK: esp.cmul.s16.ld.incp q6, a4, q1, q1, q5, 0 # encoding: [0xbf,0x58,0xc3,0x34] +esp.cmul.s16.st.incp q4, a0, q0, q5, q0, 0 +# CHECK: esp.cmul.s16.st.incp q4, a0, q0, q5, q0, 0 # encoding: [0x3f,0x50,0xc1,0xa2] +esp.cmul.s8 q6, q1, q6, 2 +# CHECK: esp.cmul.s8 q6, q1, q6, 2 # encoding: [0x5f,0x27,0x03,0x38] +esp.cmul.s8.ld.incp q4, a3, q0, q4, q2, 3 +# CHECK: esp.cmul.s8.ld.incp q4, a3, q0, q4, q2, 3 # encoding: [0x3f,0xd0,0x72,0x88] +esp.cmul.s8.st.incp q5, a1, q5, q2, q0, 3 +# CHECK: esp.cmul.s8.st.incp q5, a1, q5, q2, q0, 3 # encoding: [0xbf,0xd6,0x71,0x42] +esp.cmul.u16 q2, q3, q5, 1 +# CHECK: esp.cmul.u16 q2, q3, q5, 1 # encoding: [0x5f,0xa5,0x04,0x74] +esp.cmul.u16.ld.incp q1, a1, q6, q5, q1, 0 +# CHECK: esp.cmul.u16.ld.incp q1, a1, q6, q5, q1, 0 # encoding: [0x3f,0xc7,0x81,0xa4] +esp.cmul.u16.st.incp q6, a1, q2, q2, q4, 0 +# CHECK: esp.cmul.u16.st.incp q6, a1, q2, q2, q4, 0 # encoding: [0x3f,0xd9,0x81,0x52] +esp.cmul.u8 q1, q4, q3, 3 +# CHECK: esp.cmul.u8 q1, q4, q3, 3 # encoding: [0xdf,0xa4,0x01,0x8c] +esp.cmul.u8.ld.incp q2, a5, q3, q0, q5, 1 +# CHECK: esp.cmul.u8.ld.incp q2, a5, q3, q0, q5, 1 # encoding: [0xbf,0xc9,0x13,0x14] +esp.cmul.u8.st.incp q4, a2, q0, q4, q4, 0 +# CHECK: esp.cmul.u8.st.incp q4, a2, q0, q4, q4, 0 # encoding: [0x3f,0x50,0x02,0x92] +esp.max.s16.a q5, a2 +# CHECK: esp.max.s16.a q5, a2 # encoding: [0x5b,0x52,0xc8,0x91] +esp.max.s32.a q5, a5 +# CHECK: esp.max.s32.a q5, a5 # encoding: [0xdb,0x53,0xa8,0x91] +esp.max.s8.a q2, a0 +# CHECK: esp.max.s8.a q2, a0 # encoding: [0x5b,0x51,0x40,0x92] +esp.max.u16.a q6, a3 +# CHECK: esp.max.u16.a q6, a3 # encoding: [0xdb,0x52,0x88,0x92] +esp.max.u32.a q0, a2 +# CHECK: esp.max.u32.a q0, a2 # encoding: [0x5b,0x52,0x20,0x90] +esp.max.u8.a q5, a0 +# CHECK: esp.max.u8.a q5, a0 # encoding: [0x5b,0x51,0x08,0x91] +esp.min.s16.a q0, a3 +# CHECK: esp.min.s16.a q0, a3 # encoding: [0xdb,0x52,0xd0,0x90] +esp.min.s32.a q1, a1 +# CHECK: esp.min.s32.a q1, a1 # encoding: [0xdb,0x51,0xb0,0x91] +esp.min.s8.a q3, a4 +# CHECK: esp.min.s8.a q3, a4 # encoding: [0x5b,0x53,0x50,0x93] +esp.min.u16.a q6, a0 +# CHECK: esp.min.u16.a q6, a0 # encoding: [0x5b,0x51,0x98,0x92] +esp.min.u32.a q2, a3 +# CHECK: esp.min.u32.a q2, a3 # encoding: [0xdb,0x52,0x30,0x92] +esp.min.u8.a q1, a3 +# CHECK: esp.min.u8.a q1, a3 # encoding: [0xdb,0x52,0x10,0x91] +esp.vabs.16 q5, q2 +# CHECK: esp.vabs.16 q5, q2 # encoding: [0x5b,0x10,0x50,0x88] +esp.vabs.32 q0, q3 +# CHECK: esp.vabs.32 q0, q3 # encoding: [0x5b,0x08,0x00,0x8c] +esp.vabs.8 q6, q1 +# CHECK: esp.vabs.8 q6, q1 # encoding: [0x5b,0x00,0x60,0x84] +esp.vadd.s16 q1, q0, q3 +# CHECK: esp.vadd.s16 q1, q0, q3 # encoding: [0x5f,0x06,0x94,0x0e] +esp.vadd.s16.ld.incp q2, a3, q4, q3, q1 +# CHECK: esp.vadd.s16.ld.incp q2, a3, q4, q3, q1 # encoding: [0x3b,0xe8,0x4a,0x65] +esp.vadd.s16.st.incp q6, a2, q1, q3, q1 +# CHECK: esp.vadd.s16.st.incp q6, a2, q1, q3, q1 # encoding: [0x3b,0x78,0x1a,0x67] +esp.vadd.s32 q2, q5, q3 +# CHECK: esp.vadd.s32 q2, q5, q3 # encoding: [0x5f,0x05,0xa4,0xae] +esp.vadd.s32.ld.incp q5, a0, q0, q6, q2 +# CHECK: esp.vadd.s32.ld.incp q5, a0, q0, q6, q2 # encoding: [0x3b,0x75,0x01,0xc9] +esp.vadd.s32.st.incp q5, a0, q2, q6, q1 +# CHECK: esp.vadd.s32.st.incp q5, a0, q2, q6, q1 # encoding: [0x3b,0x75,0x21,0xc7] +esp.vadd.s8 q6, q4, q1 +# CHECK: esp.vadd.s8 q6, q4, q1 # encoding: [0x5f,0x06,0xe0,0x86] +esp.vadd.s8.ld.incp q6, a0, q3, q0, q4 +# CHECK: esp.vadd.s8.ld.incp q6, a0, q3, q0, q4 # encoding: [0x3b,0x78,0x39,0x10] +esp.vadd.s8.st.incp q0, a5, q0, q0, q4 +# CHECK: esp.vadd.s8.st.incp q0, a5, q0, q0, q4 # encoding: [0x3b,0xe0,0x0b,0x12] +esp.vadd.u16 q3, q5, q0 +# CHECK: esp.vadd.u16 q3, q5, q0 # encoding: [0x5f,0x04,0xb4,0xa2] +esp.vadd.u16.ld.incp q6, a1, q4, q6, q4 +# CHECK: esp.vadd.u16.ld.incp q6, a1, q4, q6, q4 # encoding: [0x3b,0xf8,0x41,0xd1] +esp.vadd.u16.st.incp q5, a2, q4, q3, q5 +# CHECK: esp.vadd.u16.st.incp q5, a2, q4, q3, q5 # encoding: [0x3b,0x74,0x42,0x77] +esp.vadd.u32 q5, q2, q2 +# CHECK: esp.vadd.u32 q5, q2, q2 # encoding: [0x5f,0x05,0xd0,0x4a] +esp.vadd.u32.ld.incp q3, a0, q6, q1, q2 +# CHECK: esp.vadd.u32.ld.incp q3, a0, q6, q1, q2 # encoding: [0x3b,0x6d,0x61,0x28] +esp.vadd.u32.st.incp q1, a1, q2, q6, q6 +# CHECK: esp.vadd.u32.st.incp q1, a1, q2, q6, q6 # encoding: [0x3b,0xe5,0x21,0xda] +esp.vadd.u8 q0, q0, q3 +# CHECK: esp.vadd.u8 q0, q0, q3 # encoding: [0x5f,0x04,0x80,0x0e] +esp.vadd.u8.ld.incp q3, a1, q0, q0, q1 +# CHECK: esp.vadd.u8.ld.incp q3, a1, q0, q0, q1 # encoding: [0x3b,0xec,0x01,0x04] +esp.vadd.u8.st.incp q1, a1, q0, q4, q6 +# CHECK: esp.vadd.u8.st.incp q1, a1, q0, q4, q6 # encoding: [0x3b,0xe4,0x01,0x9a] +esp.vclamp.s16 q0, q5, 4 +# CHECK: esp.vclamp.s16 q0, q5, 4 # encoding: [0x5b,0x50,0x00,0xa1] +esp.vmax.s16 q4, q1, q2 +# CHECK: esp.vmax.s16 q4, q1, q2 # encoding: [0x5f,0xae,0x06,0x28] +esp.vmax.s16.ld.incp q3, a5, q3, q4, q4 +# CHECK: esp.vmax.s16.ld.incp q3, a5, q3, q4, q4 # encoding: [0xbf,0xcd,0x6b,0x90] +esp.vmax.s16.st.incp q4, a4, q0, q5, q6 +# CHECK: esp.vmax.s16.st.incp q4, a4, q0, q5, q6 # encoding: [0x3f,0x50,0xeb,0xb8] +esp.vmax.s32 q4, q0, q4 +# CHECK: esp.vmax.s32 q4, q0, q4 # encoding: [0x5f,0xae,0x05,0x10] +esp.vmax.s32.ld.incp q2, a0, q2, q2, q1 +# CHECK: esp.vmax.s32.ld.incp q2, a0, q2, q2, q1 # encoding: [0x3f,0x49,0x59,0x44] +esp.vmax.s32.st.incp q4, a1, q1, q4, q2 +# CHECK: esp.vmax.s32.st.incp q4, a1, q1, q4, q2 # encoding: [0xbf,0xd0,0xd9,0x88] +esp.vmax.s8 q3, q1, q0 +# CHECK: esp.vmax.s8 q3, q1, q0 # encoding: [0xdf,0xad,0x02,0x20] +esp.vmax.s8.ld.incp q4, a3, q5, q2, q5 +# CHECK: esp.vmax.s8.ld.incp q4, a3, q5, q2, q5 # encoding: [0xbf,0xd2,0x2a,0x54] +esp.vmax.s8.st.incp q0, a3, q3, q4, q5 +# CHECK: esp.vmax.s8.st.incp q0, a3, q3, q4, q5 # encoding: [0xbf,0xc1,0xaa,0x94] +esp.vmax.u16 q6, q3, q2 +# CHECK: esp.vmax.u16 q6, q3, q2 # encoding: [0x5f,0xaf,0x04,0x68] +esp.vmax.u16.ld.incp q5, a1, q5, q5, q3 +# CHECK: esp.vmax.u16.ld.incp q5, a1, q5, q5, q3 # encoding: [0xbf,0xd6,0x49,0xac] +esp.vmax.u16.st.incp q2, a4, q3, q2, q5 +# CHECK: esp.vmax.u16.st.incp q2, a4, q3, q2, q5 # encoding: [0xbf,0x49,0xcb,0x54] +esp.vmax.u32 q1, q0, q3 +# CHECK: esp.vmax.u32 q1, q0, q3 # encoding: [0xdf,0xac,0x01,0x0c] +esp.vmax.u32.ld.incp q3, a1, q5, q0, q4 +# CHECK: esp.vmax.u32.ld.incp q3, a1, q5, q0, q4 # encoding: [0xbf,0xce,0x19,0x10] +esp.vmax.u32.st.incp q1, a3, q2, q5, q5 +# CHECK: esp.vmax.u32.st.incp q1, a3, q2, q5, q5 # encoding: [0x3f,0xc5,0x9a,0xb4] +esp.vmax.u8 q1, q4, q0 +# CHECK: esp.vmax.u8 q1, q4, q0 # encoding: [0xdf,0xac,0x00,0x80] +esp.vmax.u8.ld.incp q1, a2, q3, q4, q6 +# CHECK: esp.vmax.u8.ld.incp q1, a2, q3, q4, q6 # encoding: [0xbf,0x45,0x0a,0x98] +esp.vmax.u8.st.incp q2, a3, q5, q3, q5 +# CHECK: esp.vmax.u8.st.incp q2, a3, q5, q3, q5 # encoding: [0xbf,0xca,0x8a,0x74] +esp.vmin.s16 q5, q3, q2 +# CHECK: esp.vmin.s16 q5, q3, q2 # encoding: [0xdf,0x3e,0x06,0x68] +esp.vmin.s16.ld.incp q6, a3, q1, q4, q5 +# CHECK: esp.vmin.s16.ld.incp q6, a3, q1, q4, q5 # encoding: [0xbf,0xd8,0x6a,0x95] +esp.vmin.s16.st.incp q0, a1, q5, q0, q3 +# CHECK: esp.vmin.s16.st.incp q0, a1, q5, q0, q3 # encoding: [0xbf,0xc2,0xe9,0x0d] +esp.vmin.s32 q3, q1, q4 +# CHECK: esp.vmin.s32 q3, q1, q4 # encoding: [0xdf,0x3d,0x05,0x30] +esp.vmin.s32.ld.incp q4, a0, q1, q6, q3 +# CHECK: esp.vmin.s32.ld.incp q4, a0, q1, q6, q3 # encoding: [0xbf,0x50,0x59,0xcd] +esp.vmin.s32.st.incp q6, a5, q4, q6, q2 +# CHECK: esp.vmin.s32.st.incp q6, a5, q4, q6, q2 # encoding: [0x3f,0xda,0xdb,0xc9] +esp.vmin.s8 q3, q2, q1 +# CHECK: esp.vmin.s8 q3, q2, q1 # encoding: [0xdf,0x3d,0x02,0x44] +esp.vmin.s8.ld.incp q1, a4, q5, q5, q6 +# CHECK: esp.vmin.s8.ld.incp q1, a4, q5, q5, q6 # encoding: [0xbf,0x46,0x2b,0xb9] +esp.vmin.s8.st.incp q5, a1, q6, q2, q0 +# CHECK: esp.vmin.s8.st.incp q5, a1, q6, q2, q0 # encoding: [0x3f,0xd7,0xa9,0x41] +esp.vmin.u16 q5, q0, q1 +# CHECK: esp.vmin.u16 q5, q0, q1 # encoding: [0xdf,0x3e,0x04,0x04] +esp.vmin.u16.ld.incp q3, a5, q5, q3, q5 +# CHECK: esp.vmin.u16.ld.incp q3, a5, q5, q3, q5 # encoding: [0xbf,0xce,0x4b,0x75] +esp.vmin.u16.st.incp q2, a3, q5, q6, q3 +# CHECK: esp.vmin.u16.st.incp q2, a3, q5, q6, q3 # encoding: [0xbf,0xca,0xca,0xcd] +esp.vmin.u32 q5, q0, q2 +# CHECK: esp.vmin.u32 q5, q0, q2 # encoding: [0xdf,0x3e,0x01,0x08] +esp.vmin.u32.ld.incp q4, a5, q4, q4, q2 +# CHECK: esp.vmin.u32.ld.incp q4, a5, q4, q4, q2 # encoding: [0x3f,0xd2,0x1b,0x89] +esp.vmin.u32.st.incp q4, a2, q1, q6, q3 +# CHECK: esp.vmin.u32.st.incp q4, a2, q1, q6, q3 # encoding: [0xbf,0x50,0x9a,0xcd] +esp.vmin.u8 q0, q0, q0 +# CHECK: esp.vmin.u8 q0, q0, q0 # encoding: [0x5f,0x3c,0x00,0x00] +esp.vmin.u8.ld.incp q2, a5, q1, q0, q6 +# CHECK: esp.vmin.u8.ld.incp q2, a5, q1, q0, q6 # encoding: [0xbf,0xc8,0x0b,0x19] +esp.vmin.u8.st.incp q1, a2, q0, q1, q1 +# CHECK: esp.vmin.u8.st.incp q1, a2, q0, q1, q1 # encoding: [0x3f,0x44,0x8a,0x25] +esp.vmul.s16 q1, q2, q1 +# CHECK: esp.vmul.s16 q1, q2, q1 # encoding: [0xdf,0xbc,0x06,0x44] +esp.vmul.s16.ld.incp q4, a3, q1, q4, q2 +# CHECK: esp.vmul.s16.ld.incp q4, a3, q1, q4, q2 # encoding: [0xbf,0xd0,0x6a,0x8b] +esp.vmul.s16.s8xs8 q4, q5, q1, q1 +# CHECK: esp.vmul.s16.s8xs8 q4, q5, q1, q1 # encoding: [0x5f,0x06,0xd3,0x26] +esp.vmul.s16.st.incp q2, a3, q1, q4, q0 +# CHECK: esp.vmul.s16.st.incp q2, a3, q1, q4, q0 # encoding: [0xbf,0xc8,0xea,0x83] +esp.vmul.s32.s16xs16 q4, q5, q2, q1 +# CHECK: esp.vmul.s32.s16xs16 q4, q5, q2, q1 # encoding: [0x5f,0x06,0xd7,0x46] +esp.vmul.s8 q1, q4, q1 +# CHECK: esp.vmul.s8 q1, q4, q1 # encoding: [0xdf,0xbc,0x02,0x84] +esp.vmul.s8.ld.incp q0, a0, q4, q0, q4 +# CHECK: esp.vmul.s8.ld.incp q0, a0, q4, q0, q4 # encoding: [0x3f,0x42,0x29,0x13] +esp.vmul.s8.st.incp q4, a3, q5, q6, q6 +# CHECK: esp.vmul.s8.st.incp q4, a3, q5, q6, q6 # encoding: [0xbf,0xd2,0xaa,0xdb] +esp.vmul.u16 q6, q6, q1 +# CHECK: esp.vmul.u16 q6, q6, q1 # encoding: [0x5f,0xbf,0x04,0xc4] +esp.vmul.u16.ld.incp q1, a5, q5, q4, q6 +# CHECK: esp.vmul.u16.ld.incp q1, a5, q5, q4, q6 # encoding: [0xbf,0xc6,0x4b,0x9b] +esp.vmul.u16.st.incp q3, a4, q5, q3, q3 +# CHECK: esp.vmul.u16.st.incp q3, a4, q5, q3, q3 # encoding: [0xbf,0x4e,0xcb,0x6f] +esp.vmul.u8 q0, q1, q5 +# CHECK: esp.vmul.u8 q0, q1, q5 # encoding: [0x5f,0xbc,0x00,0x34] +esp.vmul.u8.ld.incp q5, a3, q0, q4, q5 +# CHECK: esp.vmul.u8.ld.incp q5, a3, q0, q4, q5 # encoding: [0x3f,0xd4,0x0a,0x97] +esp.vmul.u8.st.incp q5, a0, q2, q4, q5 +# CHECK: esp.vmul.u8.st.incp q5, a0, q2, q4, q5 # encoding: [0x3f,0x55,0x89,0x97] +esp.vprelu.s16 q0, q3, q2, a4 +# CHECK: esp.vprelu.s16 q0, q3, q2, a4 # encoding: [0x5f,0x60,0xa3,0x4e] +esp.vprelu.s8 q4, q5, q5, a3 +# CHECK: esp.vprelu.s8 q4, q5, q5, a3 # encoding: [0x5f,0xe2,0x22,0xb6] +esp.vrelu.s16 q1, a1, a4 +# CHECK: esp.vrelu.s16 q1, a1, a4 # encoding: [0x5b,0x5c,0x33,0x86] +esp.vrelu.s8 q6, a1, a5 +# CHECK: esp.vrelu.s8 q6, a1, a5 # encoding: [0x5b,0xd8,0x33,0x9a] +esp.vsadds.s16 q5, q6, a4 +# CHECK: esp.vsadds.s16 q5, q6, a4 # encoding: [0x5f,0x02,0xd3,0xda] +esp.vsadds.s8 q6, q1, a4 +# CHECK: esp.vsadds.s8 q6, q1, a4 # encoding: [0x5f,0x02,0xe3,0x2a] +esp.vsadds.u16 q0, q6, a2 +# CHECK: esp.vsadds.u16 q0, q6, a2 # encoding: [0x5f,0x02,0x82,0xd2] +esp.vsadds.u8 q3, q3, a1 +# CHECK: esp.vsadds.u8 q3, q3, a1 # encoding: [0x5f,0x82,0xb1,0x62] +esp.vsat.s16 q3, q6, a0, a4 +# CHECK: esp.vsat.s16 q3, q6, a0, a4 # encoding: [0xbb,0x59,0x61,0xd8] +esp.vsat.s32 q6, q4, a3, a2 +# CHECK: esp.vsat.s32 q6, q4, a3, a2 # encoding: [0x3b,0xd7,0x42,0x98] +esp.vsat.s8 q3, q2, a2, a1 +# CHECK: esp.vsat.s8 q3, q2, a2, a1 # encoding: [0xbb,0x49,0x32,0x58] +esp.vsat.u16 q5, q5, a0, a1 +# CHECK: esp.vsat.u16 q5, q5, a0, a1 # encoding: [0xbb,0x52,0x31,0xb8] +esp.vsat.u32 q6, q5, a3, a3 +# CHECK: esp.vsat.u32 q6, q5, a3, a3 # encoding: [0x3b,0xc7,0x52,0xb8] +esp.vsat.u8 q4, q1, a0, a3 +# CHECK: esp.vsat.u8 q4, q1, a0, a3 # encoding: [0x3b,0x42,0x51,0x38] +esp.vssubs.s16 q1, q6, a1 +# CHECK: esp.vssubs.s16 q1, q6, a1 # encoding: [0x5f,0x82,0x91,0xde] +esp.vssubs.s8 q6, q0, a3 +# CHECK: esp.vssubs.s8 q6, q0, a3 # encoding: [0x5f,0x82,0xe2,0x0e] +esp.vssubs.u16 q6, q1, a2 +# CHECK: esp.vssubs.u16 q6, q1, a2 # encoding: [0x5f,0x02,0xe2,0x36] +esp.vssubs.u8 q2, q4, a1 +# CHECK: esp.vssubs.u8 q2, q4, a1 # encoding: [0x5f,0x82,0xa1,0x86] +esp.vsub.s16 q3, q1, q2 +# CHECK: esp.vsub.s16 q3, q1, q2 # encoding: [0xdf,0x06,0xb4,0x2a] +esp.vsub.s16.ld.incp q5, a2, q6, q3, q5 +# CHECK: esp.vsub.s16.ld.incp q5, a2, q6, q3, q5 # encoding: [0x3b,0x75,0xea,0x75] +esp.vsub.s16.st.incp q4, a0, q1, q1, q1 +# CHECK: esp.vsub.s16.st.incp q4, a0, q1, q1, q1 # encoding: [0x3b,0x71,0x99,0x27] +esp.vsub.s32 q4, q1, q0 +# CHECK: esp.vsub.s32 q4, q1, q0 # encoding: [0xdf,0x05,0xc4,0x22] +esp.vsub.s32.ld.incp q2, a2, q5, q0, q5 +# CHECK: esp.vsub.s32.ld.incp q2, a2, q5, q0, q5 # encoding: [0x3b,0x6b,0x52,0x15] +esp.vsub.s32.st.incp q5, a5, q4, q1, q6 +# CHECK: esp.vsub.s32.st.incp q5, a5, q4, q1, q6 # encoding: [0x3b,0xf7,0x43,0x3b] +esp.vsub.s8 q2, q1, q3 +# CHECK: esp.vsub.s8 q2, q1, q3 # encoding: [0xdf,0x06,0xa0,0x2e] +esp.vsub.s8.ld.incp q4, a4, q5, q4, q0 +# CHECK: esp.vsub.s8.ld.incp q4, a4, q5, q4, q0 # encoding: [0x3b,0x71,0xdb,0x80] +esp.vsub.s8.st.incp q1, a5, q0, q1, q1 +# CHECK: esp.vsub.s8.st.incp q1, a5, q0, q1, q1 # encoding: [0x3b,0xe5,0x8b,0x26] +esp.vsub.u16 q1, q2, q1 +# CHECK: esp.vsub.u16 q1, q2, q1 # encoding: [0xdf,0x04,0x94,0x46] +esp.vsub.u16.ld.incp q6, a4, q1, q6, q1 +# CHECK: esp.vsub.u16.ld.incp q6, a4, q1, q6, q1 # encoding: [0x3b,0x79,0x93,0xc5] +esp.vsub.u16.st.incp q1, a0, q0, q1, q6 +# CHECK: esp.vsub.u16.st.incp q1, a0, q0, q1, q6 # encoding: [0x3b,0x65,0x81,0x3b] +esp.vsub.u32 q2, q2, q5 +# CHECK: esp.vsub.u32 q2, q2, q5 # encoding: [0xdf,0x05,0xa0,0x56] +esp.vsub.u32.ld.incp q4, a5, q2, q4, q3 +# CHECK: esp.vsub.u32.ld.incp q4, a5, q2, q4, q3 # encoding: [0x3b,0xf3,0x23,0x8c] +esp.vsub.u32.st.incp q0, a0, q3, q3, q1 +# CHECK: esp.vsub.u32.st.incp q0, a0, q3, q3, q1 # encoding: [0x3b,0x63,0x31,0x66] +esp.vsub.u8 q1, q4, q1 +# CHECK: esp.vsub.u8 q1, q4, q1 # encoding: [0xdf,0x04,0x90,0x86] +esp.vsub.u8.ld.incp q4, a1, q4, q5, q2 +# CHECK: esp.vsub.u8.ld.incp q4, a1, q4, q5, q2 # encoding: [0x3b,0xf1,0xc1,0xa8] +esp.vsub.u8.st.incp q6, a4, q6, q5, q5 +# CHECK: esp.vsub.u8.st.incp q6, a4, q6, q5, q5 # encoding: [0x3b,0x79,0xe3,0xb6] +esp.addx2 a1, a4, a2 +# CHECK: esp.addx2 a1, a4, a2 # encoding: [0xb3,0x05,0xc7,0x04] +esp.addx4 a1, a3, a3 +# CHECK: esp.addx4 a1, a3, a3 # encoding: [0xb3,0x85,0xd6,0x08] +esp.sat a5, a1, a2 +# CHECK: esp.sat a5, a1, a2 # encoding: [0xb3,0x25,0xf6,0x40] +esp.subx2 a0, a3, a5 +# CHECK: esp.subx2 a0, a3, a5 # encoding: [0x33,0x85,0xf6,0x44] +esp.subx4 a5, a4, a0 +# CHECK: esp.subx4 a5, a4, a0 # encoding: [0xb3,0x07,0xa7,0x48] +esp.andq q4, q2, q2 +# CHECK: esp.andq q4, q2, q2 # encoding: [0x5f,0x22,0x04,0x48] +esp.notq q0, q1 +# CHECK: esp.notq q0, q1 # encoding: [0x5f,0x20,0x06,0x20] +esp.orq q4, q3, q1 +# CHECK: esp.orq q4, q3, q1 # encoding: [0x5f,0x22,0x00,0x64] +esp.xorq q2, q1, q1 +# CHECK: esp.xorq q2, q1, q1 # encoding: [0x5f,0x21,0x02,0x24] +esp.vcmp.eq.s16 q1, q5, q1 +# CHECK: esp.vcmp.eq.s16 q1, q5, q1 # encoding: [0xdf,0xb4,0x01,0xa4] +esp.vcmp.eq.s32 q3, q3, q2 +# CHECK: esp.vcmp.eq.s32 q3, q3, q2 # encoding: [0xdf,0x2d,0x01,0x68] +esp.vcmp.eq.s8 q3, q6, q6 +# CHECK: esp.vcmp.eq.s8 q3, q6, q6 # encoding: [0xdf,0xb5,0x00,0xd8] +esp.vcmp.eq.u16 q6, q2, q5 +# CHECK: esp.vcmp.eq.u16 q6, q2, q5 # encoding: [0x5f,0x37,0x01,0x54] +esp.vcmp.eq.u32 q0, q6, q6 +# CHECK: esp.vcmp.eq.u32 q0, q6, q6 # encoding: [0x5f,0x2c,0x00,0xd8] +esp.vcmp.eq.u8 q4, q2, q4 +# CHECK: esp.vcmp.eq.u8 q4, q2, q4 # encoding: [0x5f,0x36,0x00,0x50] +esp.vcmp.gt.s16 q4, q0, q2 +# CHECK: esp.vcmp.gt.s16 q4, q0, q2 # encoding: [0x5f,0xb6,0x05,0x08] +esp.vcmp.gt.s32 q1, q1, q0 +# CHECK: esp.vcmp.gt.s32 q1, q1, q0 # encoding: [0xdf,0x2c,0x05,0x20] +esp.vcmp.gt.s8 q4, q0, q2 +# CHECK: esp.vcmp.gt.s8 q4, q0, q2 # encoding: [0x5f,0xb6,0x04,0x08] +esp.vcmp.gt.u16 q5, q6, q2 +# CHECK: esp.vcmp.gt.u16 q5, q6, q2 # encoding: [0xdf,0x36,0x05,0xc8] +esp.vcmp.gt.u32 q1, q5, q2 +# CHECK: esp.vcmp.gt.u32 q1, q5, q2 # encoding: [0xdf,0x2c,0x04,0xa8] +esp.vcmp.gt.u8 q1, q4, q4 +# CHECK: esp.vcmp.gt.u8 q1, q4, q4 # encoding: [0xdf,0x34,0x04,0x90] +esp.vcmp.lt.s16 q6, q2, q5 +# CHECK: esp.vcmp.lt.s16 q6, q2, q5 # encoding: [0x5f,0xb7,0x03,0x54] +esp.vcmp.lt.s32 q2, q3, q2 +# CHECK: esp.vcmp.lt.s32 q2, q3, q2 # encoding: [0x5f,0x2d,0x03,0x68] +esp.vcmp.lt.s8 q0, q6, q2 +# CHECK: esp.vcmp.lt.s8 q0, q6, q2 # encoding: [0x5f,0xb4,0x02,0xc8] +esp.vcmp.lt.u16 q0, q2, q5 +# CHECK: esp.vcmp.lt.u16 q0, q2, q5 # encoding: [0x5f,0x34,0x03,0x54] +esp.vcmp.lt.u32 q1, q0, q3 +# CHECK: esp.vcmp.lt.u32 q1, q0, q3 # encoding: [0xdf,0x2c,0x02,0x0c] +esp.vcmp.lt.u8 q1, q1, q4 +# CHECK: esp.vcmp.lt.u8 q1, q1, q4 # encoding: [0xdf,0x34,0x02,0x30] +esp.mov.s16.qacc q5 +# CHECK: esp.mov.s16.qacc q5 # encoding: [0x5b,0x14,0x60,0x10] +esp.mov.s8.qacc q3 +# CHECK: esp.mov.s8.qacc q3 # encoding: [0x5b,0x0c,0x20,0x10] +esp.mov.u16.qacc q6 +# CHECK: esp.mov.u16.qacc q6 # encoding: [0x5b,0x18,0x40,0x10] +esp.mov.u8.qacc q3 +# CHECK: esp.mov.u8.qacc q3 # encoding: [0x5b,0x0c,0x00,0x10] +esp.movi.16.a q5, a4, 2 +# CHECK: esp.movi.16.a q5, a4, 2 # encoding: [0x5f,0x03,0xc1,0x14] +esp.movi.16.q q1, a3, 7 +# CHECK: esp.movi.16.q q1, a3, 7 # encoding: [0xdf,0x83,0xe2,0x84] +esp.movi.32.a q6, a0, 2 +# CHECK: esp.movi.32.a q6, a0, 2 # encoding: [0x5f,0x01,0xc0,0x98] +esp.movi.32.q q6, a4, 2 +# CHECK: esp.movi.32.q q6, a4, 2 # encoding: [0x5f,0x04,0x93,0x98] +esp.movi.8.a q5, a5, 6 +# CHECK: esp.movi.8.a q5, a5, 6 # encoding: [0xdf,0x03,0x83,0x14] +esp.movi.8.q q2, a1, 15 +# CHECK: esp.movi.8.q q2, a1, 15 # encoding: [0xdf,0x87,0xa1,0x88] +esp.movx.r.cfg a4 +# CHECK: esp.movx.r.cfg a4 # encoding: [0x5f,0x03,0xd0,0x80] +esp.movx.r.fft.bit.width a2 +# CHECK: esp.movx.r.fft.bit.width a2 # encoding: [0x5f,0x02,0xd0,0x84] +esp.movx.r.perf a4, a1 +# CHECK: esp.movx.r.perf a4, a1 # encoding: [0x5f,0x83,0xd1,0x8c] +esp.movx.r.sar a5 +# CHECK: esp.movx.r.sar a5 # encoding: [0xdf,0x03,0xb0,0x80] +esp.movx.r.sar.bytes a4 +# CHECK: esp.movx.r.sar.bytes a4 # encoding: [0x5f,0x03,0xb0,0x88] +esp.movx.r.xacc.h a5 +# CHECK: esp.movx.r.xacc.h a5 # encoding: [0xdf,0x03,0xb0,0x8c] +esp.movx.r.xacc.l a3 +# CHECK: esp.movx.r.xacc.l a3 # encoding: [0xdf,0x02,0xb0,0x84] +esp.movx.w.cfg a1 +# CHECK: esp.movx.w.cfg a1 # encoding: [0x5f,0x80,0xd1,0x90] +esp.movx.w.fft.bit.width a1 +# CHECK: esp.movx.w.fft.bit.width a1 # encoding: [0x5f,0x80,0xd1,0x94] +esp.movx.w.perf a1 +# CHECK: esp.movx.w.perf a1 # encoding: [0x5f,0x80,0xd1,0x9c] +esp.movx.w.sar a2 +# CHECK: esp.movx.w.sar a2 # encoding: [0x5f,0x00,0xb2,0x90] +esp.movx.w.sar.bytes a1 +# CHECK: esp.movx.w.sar.bytes a1 # encoding: [0x5f,0x80,0xb1,0x98] +esp.movx.w.xacc.h a5 +# CHECK: esp.movx.w.xacc.h a5 # encoding: [0x5f,0x80,0xb3,0x9c] +esp.movx.w.xacc.l a2 +# CHECK: esp.movx.w.xacc.l a2 # encoding: [0x5f,0x00,0xb2,0x94] +esp.vext.s16 q3, q1, q5 +# CHECK: esp.vext.s16 q3, q1, q5 # encoding: [0xdb,0x59,0x18,0x19] +esp.vext.s8 q0, q0, q6 +# CHECK: esp.vext.s8 q0, q0, q6 # encoding: [0x5b,0x58,0x08,0x0a] +esp.vext.u16 q4, q2, q6 +# CHECK: esp.vext.u16 q4, q2, q6 # encoding: [0x5b,0x5a,0x28,0x12] +esp.vext.u8 q1, q2, q5 +# CHECK: esp.vext.u8 q1, q2, q5 # encoding: [0xdb,0x58,0x28,0x01] +esp.vunzip.16 q2, q3 +# CHECK: esp.vunzip.16 q2, q3 # encoding: [0x5f,0x00,0x86,0x4e] +esp.vunzip.32 q2, q1 +# CHECK: esp.vunzip.32 q2, q1 # encoding: [0x5f,0x80,0x84,0x46] +esp.vunzip.8 q3, q5 +# CHECK: esp.vunzip.8 q3, q5 # encoding: [0x5f,0x00,0x84,0x76] +esp.vunzipt.16 q5, q4, q5 +# CHECK: esp.vunzipt.16 q5, q4, q5 # encoding: [0x5b,0x4c,0xc8,0xb1] +esp.vunzipt.8 q3, q6, q4 +# CHECK: esp.vunzipt.8 q3, q6, q4 # encoding: [0x5b,0x4c,0x88,0x78] +esp.vzip.16 q1, q0 +# CHECK: esp.vzip.16 q1, q0 # encoding: [0x5f,0x00,0x82,0x22] +esp.vzip.32 q1, q1 +# CHECK: esp.vzip.32 q1, q1 # encoding: [0x5f,0x80,0x80,0x26] +esp.vzip.8 q2, q2 +# CHECK: esp.vzip.8 q2, q2 # encoding: [0x5f,0x00,0x80,0x4a] +esp.vzipt.16 q1, q2, q1 +# CHECK: esp.vzipt.16 q1, q2, q1 # encoding: [0x5b,0x4c,0x40,0x29] +esp.vzipt.8 q3, q3, q5 +# CHECK: esp.vzipt.8 q3, q3, q5 # encoding: [0x5b,0x4c,0x08,0x6d] +esp.zero.q q0 +# CHECK: esp.zero.q q0 # encoding: [0x5b,0x00,0x40,0x00] +esp.zero.qacc +# CHECK: esp.zero.qacc # encoding: [0x5b,0x02,0x00,0x00] +esp.zero.xacc +# CHECK: esp.zero.xacc # encoding: [0x5b,0x00,0x00,0x00] +esp.fft.ams.s16.ld.incp q5, a4, q6, q2, q3, q3, q0, 1 +# CHECK: esp.fft.ams.s16.ld.incp q5, a4, q6, q2, q3, q3, q0, 1 # encoding: [0x7b,0x17,0xa3,0x63] +esp.fft.ams.s16.ld.incp.uaup q1, a5, q3, q5, q6, q2, q0, 0 +# CHECK: esp.fft.ams.s16.ld.incp.uaup q1, a5, q3, q5, q6, q2, q0, 0 # encoding: [0xfb,0xa5,0x53,0xc2] +esp.fft.ams.s16.ld.r32.decp q4, a5, q3, q6, q5, q6, q6, 1 +# CHECK: esp.fft.ams.s16.ld.r32.decp q4, a5, q3, q6, q5, q6, q6, 1 # encoding: [0xfb,0xf1,0xeb,0xba] +esp.fft.ams.s16.st.incp q5, q3, a3, a1, q6, q2, q3, 0 +# CHECK: esp.fft.ams.s16.st.incp q5, q3, a3, a1, q6, q2, q3, 0 # encoding: [0xbf,0xb5,0x51,0xce] +esp.fft.bitrev q3, a5 +# CHECK: esp.fft.bitrev q3, a5 # encoding: [0x5b,0x82,0x33,0x10] +esp.fft.cmul.s16.ld.xp q3, a3, a4, q1, q3, q2, 4 +# CHECK: esp.fft.cmul.s16.ld.xp q3, a3, a4, q1, q3, q2, 4 # encoding: [0xbf,0x8c,0x62,0x4e] +esp.fft.cmul.s16.st.xp q1, q2, q3, a4, a0, 7, 1, 1 +# CHECK: esp.fft.cmul.s16.st.xp q1, q2, q3, a4, a0, 7, 1, 1 # encoding: [0xff,0x4f,0x23,0x45] +esp.fft.r2bf.s16 q3, q2, q5, q1, 1 +# CHECK: esp.fft.r2bf.s16 q3, q2, q5, q1, 1 # encoding: [0xdf,0x05,0xa5,0xa6] +esp.fft.r2bf.s16.st.incp q5, q4, q4, a5, 1 +# CHECK: esp.fft.r2bf.s16.st.incp q5, q4, q4, a5, 1 # encoding: [0xdf,0xe2,0x43,0x92] +esp.fft.vst.r32.decp q1, a4, 1 +# CHECK: esp.fft.vst.r32.decp q1, a4, 1 # encoding: [0x3b,0x24,0x03,0x80] +esp.ld.128.usar.ip q5, a4, -896 +# CHECK: esp.ld.128.usar.ip q5, a4, -896 # encoding: [0x3b,0x34,0x43,0xc8] +esp.ld.128.usar.xp q5, a2, a5 +# CHECK: esp.ld.128.usar.xp q5, a2, a5 # encoding: [0x5f,0x54,0x72,0x80] +esp.ld.xacc.ip a1, 616 +# CHECK: esp.ld.xacc.ip a1, 616 # encoding: [0x3b,0xd7,0x91,0x20] +esp.ldqa.s16.128.ip a3, -672 +# CHECK: esp.ldqa.s16.128.ip a3, -672 # encoding: [0xbb,0xcc,0xd2,0xe0] +esp.ldqa.s16.128.xp a4, a1 +# CHECK: esp.ldqa.s16.128.xp a4, a1 # encoding: [0x5b,0x53,0x33,0x13] +esp.ldqa.s8.128.ip a4, 1808 +# CHECK: esp.ldqa.s8.128.ip a4, 1808 # encoding: [0xbb,0x42,0x73,0x60] +esp.ldqa.s8.128.xp a0, a4 +# CHECK: esp.ldqa.s8.128.xp a0, a4 # encoding: [0x5b,0x51,0x61,0x13] +esp.ldqa.u16.128.ip a3, -496 +# CHECK: esp.ldqa.u16.128.ip a3, -496 # encoding: [0xbb,0xc2,0xe2,0xa0] +esp.ldqa.u16.128.xp a2, a4 +# CHECK: esp.ldqa.u16.128.xp a2, a4 # encoding: [0x5b,0x52,0x62,0x13] +esp.ldqa.u8.128.ip a2, 1200 +# CHECK: esp.ldqa.u8.128.ip a2, 1200 # encoding: [0xbb,0x56,0x42,0x20] +esp.ldqa.u8.128.xp a2, a5 +# CHECK: esp.ldqa.u8.128.xp a2, a5 # encoding: [0x5b,0x50,0x72,0x13] +esp.vldbc.16.ip q4, a3, 408 +# CHECK: esp.vldbc.16.ip q4, a3, 408 # encoding: [0x3b,0xb0,0x32,0xb6] +esp.vldbc.16.xp q5, a2, a1 +# CHECK: esp.vldbc.16.xp q5, a2, a1 # encoding: [0x5f,0x54,0x32,0x96] +esp.vldbc.32.ip q6, a2, -176 +# CHECK: esp.vldbc.32.ip q6, a2, -176 # encoding: [0x3b,0x38,0xa2,0xce] +esp.vldbc.32.xp q0, a1, a2 +# CHECK: esp.vldbc.32.xp q0, a1, a2 # encoding: [0x5f,0xc0,0x41,0x8e] +esp.vldbc.8.ip q6, a0, 200 +# CHECK: esp.vldbc.8.ip q6, a0, 200 # encoding: [0x3b,0x38,0x91,0x16] +esp.vldbc.8.xp q5, a4, a2 +# CHECK: esp.vldbc.8.xp q5, a4, a2 # encoding: [0x5f,0x54,0x43,0x86] +esp.vldext.s16.ip q4, q1, a3, -112 +# CHECK: esp.vldext.s16.ip q4, q1, a3, -112 # encoding: [0xbb,0xd0,0x92,0xc8] +esp.vldext.s16.xp q2, q4, a1, a0 +# CHECK: esp.vldext.s16.xp q2, q4, a1, a0 # encoding: [0x5f,0xea,0x21,0xf0] +esp.vldext.s8.ip q3, q2, a4, 0 +# CHECK: esp.vldext.s8.ip q3, q2, a4, 0 # encoding: [0x3b,0x4d,0x03,0x48] +esp.vldext.s8.xp q3, q6, a4, a1 +# CHECK: esp.vldext.s8.xp q3, q6, a4, a1 # encoding: [0x5f,0x6f,0x33,0x70] +esp.vldext.u16.ip q2, q1, a1, 48 +# CHECK: esp.vldext.u16.ip q2, q1, a1, 48 # encoding: [0xbb,0xc8,0x31,0x88] +esp.vldext.u16.xp q1, q2, a3, a1 +# CHECK: esp.vldext.u16.xp q1, q2, a3, a1 # encoding: [0x5f,0xe5,0x32,0xb0] +esp.vldext.u8.ip q5, q6, a0, -48 +# CHECK: esp.vldext.u8.ip q5, q6, a0, -48 # encoding: [0x3b,0x57,0xd1,0x08] +esp.vldext.u8.xp q0, q6, a3, a1 +# CHECK: esp.vldext.u8.xp q0, q6, a3, a1 # encoding: [0x5f,0xe3,0x32,0x30] +esp.vldhbc.16.incp q4, q2, a2 +# CHECK: esp.vldhbc.16.incp q4, q2, a2 # encoding: [0x3b,0x51,0x02,0x28] +esp.ld.qacc.h.h.128.ip a0, 816 +# CHECK: esp.ld.qacc.h.h.128.ip a0, 816 # encoding: [0x3b,0x46,0x31,0x40] +esp.ld.qacc.h.l.128.ip a2, -496 +# CHECK: esp.ld.qacc.h.l.128.ip a2, -496 # encoding: [0x3b,0x42,0xe2,0x60] +esp.ld.qacc.l.h.128.ip a1, -432 +# CHECK: esp.ld.qacc.l.h.128.ip a1, -432 # encoding: [0x3b,0xca,0xe1,0x00] +esp.ld.qacc.l.l.128.ip a0, 1840 +# CHECK: esp.ld.qacc.l.l.128.ip a0, 1840 # encoding: [0x3b,0x46,0x71,0x20] +esp.ld.ua.state.ip a4, -1392 +# CHECK: esp.ld.ua.state.ip a4, -1392 # encoding: [0x3b,0x45,0x53,0x60] +esp.ldxq.32 q5, q4, a4, 3, 6 +# CHECK: esp.ldxq.32 q5, q4, a4, 3, 6 # encoding: [0x5f,0x34,0x8b,0x78] +esp.st.qacc.h.h.128.ip a4, 656 +# CHECK: esp.st.qacc.h.h.128.ip a4, 656 # encoding: [0x3b,0x52,0x23,0xc0] +esp.st.qacc.h.l.128.ip a3, -1072 +# CHECK: esp.st.qacc.h.l.128.ip a3, -1072 # encoding: [0x3b,0xda,0xb2,0xe0] +esp.st.qacc.l.h.128.ip a2, 784 +# CHECK: esp.st.qacc.l.h.128.ip a2, 784 # encoding: [0x3b,0x42,0x32,0x80] +esp.st.qacc.l.l.128.ip a3, -736 +# CHECK: esp.st.qacc.l.l.128.ip a3, -736 # encoding: [0x3b,0xc4,0xd2,0xa0] +esp.st.ua.state.ip a5, 1376 +# CHECK: esp.st.ua.state.ip a5, 1376 # encoding: [0x3b,0xd9,0xa3,0xa0] +esp.stxq.32 q6, q5, a5, 3, 4 +# CHECK: esp.stxq.32 q6, q5, a5, 3, 4 # encoding: [0x5f,0xb8,0x8b,0xf1] +esp.vld.128.ip q1, a3, 560 +# CHECK: esp.vld.128.ip q1, a3, 560 # encoding: [0x3b,0xa6,0x12,0x12] +esp.vld.128.xp q1, a5, a3 +# CHECK: esp.vld.128.xp q1, a5, a3 # encoding: [0x5f,0xc4,0x53,0x82] +esp.vld.h.64.ip q4, a5, -568 +# CHECK: esp.vld.h.64.ip q4, a5, -568 # encoding: [0x3b,0xb2,0xc3,0x6c] +esp.vld.h.64.xp q1, a1, a2 +# CHECK: esp.vld.h.64.xp q1, a1, a2 # encoding: [0x5f,0xc4,0x41,0x8c] +esp.vld.l.64.ip q2, a3, -696 +# CHECK: esp.vld.l.64.ip q2, a3, -696 # encoding: [0x3b,0xaa,0x42,0x2c] +esp.vld.l.64.xp q5, a4, a4 +# CHECK: esp.vld.l.64.xp q5, a4, a4 # encoding: [0x5f,0x54,0x63,0x84] +esp.vst.128.ip q3, a4, 1088 +# CHECK: esp.vst.128.ip q3, a4, 1088 # encoding: [0x3b,0x2c,0x23,0xa2] +esp.vst.128.xp q1, a4, a0 +# CHECK: esp.vst.128.xp q1, a4, a0 # encoding: [0x5f,0x44,0x23,0x92] +esp.vst.h.64.ip q5, a4, -136 +# CHECK: esp.vst.h.64.ip q5, a4, -136 # encoding: [0x3b,0x36,0x73,0xfc] +esp.vst.h.64.xp q1, a0, a2 +# CHECK: esp.vst.h.64.xp q1, a0, a2 # encoding: [0x5f,0x44,0x41,0x9c] +esp.vst.l.64.ip q5, a0, -440 +# CHECK: esp.vst.l.64.ip q5, a0, -440 # encoding: [0x3b,0x36,0x41,0xb4] +esp.vst.l.64.xp q2, a5, a3 +# CHECK: esp.vst.l.64.xp q2, a5, a3 # encoding: [0x5f,0xc8,0x53,0x94] +esp.slci.2q q2, q6, 10 +# CHECK: esp.slci.2q q2, q6, 10 # encoding: [0x5b,0x49,0x48,0x0a] +esp.slcxxp.2q q2, q1, a0, a3 +# CHECK: esp.slcxxp.2q q2, q1, a0, a3 # encoding: [0x5f,0x40,0x51,0x09] +esp.src.q q5, q6, q2 +# CHECK: esp.src.q q5, q6, q2 # encoding: [0xdb,0x02,0x2c,0x8a] +esp.src.q.ld.ip q3, a4, 48, q6, q6 +# CHECK: esp.src.q.ld.ip q3, a4, 48, q6, q6 # encoding: [0x3b,0x2f,0x1b,0x1a] +esp.src.q.ld.xp q6, a5, a0, q6, q6 +# CHECK: esp.src.q.ld.xp q6, a5, a0, q6, q6 # encoding: [0x3b,0x98,0x2b,0x1a] +esp.src.q.qup q1, q5, q0 +# CHECK: esp.src.q.qup q1, q5, q0 # encoding: [0xdb,0x10,0x2c,0x81] +esp.srci.2q q5, q5, 11 +# CHECK: esp.srci.2q q5, q5, 11 # encoding: [0xdb,0x49,0xc8,0x15] +esp.srcmb.s16.q.qacc q6, q2, 0 +# CHECK: esp.srcmb.s16.q.qacc q6, q2, 0 # encoding: [0x5b,0x18,0x64,0x9a] +esp.srcmb.s16.qacc q2, a2, 0 +# CHECK: esp.srcmb.s16.qacc q2, a2, 0 # encoding: [0x3b,0x28,0x02,0xd8] +esp.srcmb.s8.q.qacc q5, q3, 0 +# CHECK: esp.srcmb.s8.q.qacc q5, q3, 0 # encoding: [0x5b,0x14,0x64,0x8b] +esp.srcmb.s8.qacc q1, a3, 1 +# CHECK: esp.srcmb.s8.qacc q1, a3, 1 # encoding: [0x3b,0xa4,0x02,0x78] +esp.srcmb.u16.q.qacc q3, q4, 0 +# CHECK: esp.srcmb.u16.q.qacc q3, q4, 0 # encoding: [0x5b,0x0c,0x6c,0x90] +esp.srcmb.u16.qacc q1, a0, 1 +# CHECK: esp.srcmb.u16.qacc q1, a0, 1 # encoding: [0x3b,0x24,0x01,0xb8] +esp.srcmb.u8.q.qacc q0, q5, 1 +# CHECK: esp.srcmb.u8.q.qacc q0, q5, 1 # encoding: [0x5b,0x00,0x6c,0x85] +esp.srcmb.u8.qacc q1, a3, 1 +# CHECK: esp.srcmb.u8.qacc q1, a3, 1 # encoding: [0x3b,0xa4,0x02,0x38] +esp.srcq.128.st.incp q1, q2, a0 +# CHECK: esp.srcq.128.st.incp q1, q2, a0 # encoding: [0x5b,0x40,0x01,0x09] +esp.srcxxp.2q q3, q3, a4, a4 +# CHECK: esp.srcxxp.2q q3, q3, a4, a4 # encoding: [0x5f,0x44,0x63,0x0f] +esp.srs.s.xacc a0, a0 +# CHECK: esp.srs.s.xacc a0, a0 # encoding: [0x5f,0x01,0xf1,0x94] +esp.srs.u.xacc a0, a3 +# CHECK: esp.srs.u.xacc a0, a3 # encoding: [0x5f,0x81,0xf2,0x84] +esp.vsl.32 q6, q4 +# CHECK: esp.vsl.32 q6, q4 # encoding: [0x5b,0x18,0x04,0x90] +esp.vsld.16 q6, q5, q1 +# CHECK: esp.vsld.16 q6, q5, q1 # encoding: [0x5f,0x18,0x20,0x15] +esp.vsld.32 q2, q4, q3 +# CHECK: esp.vsld.32 q2, q4, q3 # encoding: [0x5f,0x08,0x10,0x13] +esp.vsld.8 q1, q1, q4 +# CHECK: esp.vsld.8 q1, q1, q4 # encoding: [0x5f,0x04,0x08,0x04] +esp.vsr.s32 q1, q3 +# CHECK: esp.vsr.s32 q1, q3 # encoding: [0x5b,0x07,0x04,0x8c] +esp.vsr.u32 q5, q2 +# CHECK: esp.vsr.u32 q5, q2 # encoding: [0x5b,0x15,0x04,0x88] +esp.vsrd.16 q1, q5, q5 +# CHECK: esp.vsrd.16 q1, q5, q5 # encoding: [0x5f,0x04,0x68,0x15] +esp.vsrd.32 q1, q6, q3 +# CHECK: esp.vsrd.32 q1, q6, q3 # encoding: [0x5f,0x04,0x50,0x1b] +esp.vsrd.8 q0, q3, q1 +# CHECK: esp.vsrd.8 q0, q3, q1 # encoding: [0x5f,0x00,0x40,0x0d] +esp.st.s.xacc.ip a1, 304 +# CHECK: esp.st.s.xacc.ip a1, 304 # encoding: [0xbb,0xd9,0x41,0xa0] +esp.st.u.xacc.ip a2, 976 +# CHECK: esp.st.u.xacc.ip a2, 976 # encoding: [0xbb,0x49,0xf2,0x20] diff --git a/llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s b/llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s index fe6d0de0a4b001..e45c43a50048ad 100644 --- a/llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s +++ b/llvm/test/MC/RISCV/rv64xtheadmemidx-invalid.s @@ -1,7 +1,7 @@ # RUN: not llvm-mc -triple riscv32 -mattr=+xtheadmemidx < %s 2>&1 | FileCheck %s # RUN: not llvm-mc -triple riscv64 -mattr=+xtheadmemidx < %s 2>&1 | FileCheck %s -th.ldia 0(a0), (a1), 0, 0 # CHECK: :[[@LINE]]:23: error: invalid operand for instruction +th.ldia 0(a0), (a1), 0, 0 # CHECK: :[[@LINE]]:26: error: invalid operand for instruction th.ldib a0, 2(a1), 15, 1 # CHECK: :[[@LINE]]:14: error: invalid operand for instruction th.lwia a0, (a1), 30, 2 # CHECK: :[[@LINE]]:20: error: immediate must be an integer in the range [-16, 15] th.lwib a0, (a1), -16, 43 # CHECK: :[[@LINE]]:25: error: immediate must be an integer in the range [0, 3] diff --git a/llvm/unittests/Support/RISCVISAInfoTest.cpp b/llvm/unittests/Support/RISCVISAInfoTest.cpp index 24ed3e2e4b0561..5dff9c66bbb523 100644 --- a/llvm/unittests/Support/RISCVISAInfoTest.cpp +++ b/llvm/unittests/Support/RISCVISAInfoTest.cpp @@ -769,6 +769,7 @@ R"(All available -march extensions for RISC-V xcvmac 1.0 xcvmem 1.0 xcvsimd 1.0 + xesppie 1.0 xsfvcp 1.0 xsfvfnrclipxfqf 1.0 xsfvfwmaccqqq 1.0 diff --git a/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/llvm/utils/TableGen/AsmMatcherEmitter.cpp index 73724e662f9e87..3ee0d9b39d8756 100644 --- a/llvm/utils/TableGen/AsmMatcherEmitter.cpp +++ b/llvm/utils/TableGen/AsmMatcherEmitter.cpp @@ -2008,10 +2008,11 @@ emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName, << " &>(*Operands[OpIdx]).addRegOperands(Inst, 1);\n"; CvtOS << " break;\n"; CvtOS << " case CVT_Tied: {\n"; - CvtOS << " assert(OpIdx < (size_t)(std::end(TiedAsmOperandTable) -\n"; + CvtOS << " unsigned OpIdxTied = *(p + 1);\n"; + CvtOS << " assert(OpIdxTied < (size_t)(std::end(TiedAsmOperandTable) -\n"; CvtOS << " std::begin(TiedAsmOperandTable)) &&\n"; CvtOS << " \"Tied operand not found\");\n"; - CvtOS << " unsigned TiedResOpnd = TiedAsmOperandTable[OpIdx][0];\n"; + CvtOS << " unsigned TiedResOpnd = TiedAsmOperandTable[OpIdxTied][0];\n"; CvtOS << " if (TiedResOpnd != (uint8_t)-1)\n"; CvtOS << " Inst.addOperand(Inst.getOperand(TiedResOpnd));\n"; CvtOS << " break;\n";