[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 57/60] target/riscv: vector slide instructions
From: |
LIU Zhiwei |
Subject: |
[PATCH v5 57/60] target/riscv: vector slide instructions |
Date: |
Thu, 12 Mar 2020 22:58:57 +0800 |
Signed-off-by: LIU Zhiwei <address@hidden>
---
target/riscv/helper.h | 17 +++
target/riscv/insn32.decode | 7 ++
target/riscv/insn_trans/trans_rvv.inc.c | 17 +++
target/riscv/vector_helper.c | 136 ++++++++++++++++++++++++
4 files changed, 177 insertions(+)
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 7a689a5c07..e86df5b9e4 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1120,3 +1120,20 @@ DEF_HELPER_3(vfmv_s_f_b, void, ptr, i64, env)
DEF_HELPER_3(vfmv_s_f_h, void, ptr, i64, env)
DEF_HELPER_3(vfmv_s_f_w, void, ptr, i64, env)
DEF_HELPER_3(vfmv_s_f_d, void, ptr, i64, env)
+
+DEF_HELPER_6(vslideup_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslideup_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslideup_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslideup_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslidedown_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslidedown_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslidedown_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslidedown_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index bfdce0979c..e6ade9c68e 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -72,6 +72,7 @@
@r2_vm ...... vm:1 ..... ..... ... ..... ....... &rmr %rs2 %rd
@r1_vm ...... vm:1 ..... ..... ... ..... ....... %rd
@r_nfvm ... ... vm:1 ..... ..... ... ..... ....... &rnfvm %nf %rs2 %rs1 %rd
+@r2rd ....... ..... ..... ... ..... ....... %rs2 %rd
@r_vm ...... vm:1 ..... ..... ... ..... ....... &rmrr %rs2 %rs1 %rd
@r_wdvm ..... wd:1 vm:1 ..... ..... ... ..... ....... &rwdvm %rs2 %rs1 %rd
@r2_zimm . zimm:11 ..... ... ..... ....... %rs1 %rd
@@ -559,6 +560,12 @@ vext_x_v 001100 1 ..... ..... 010 ..... 1010111 @r
vmv_s_x 001101 1 00000 ..... 110 ..... 1010111 @r2
vfmv_f_s 001100 1 ..... 00000 001 ..... 1010111 @r2rd
vfmv_s_f 001101 1 00000 ..... 101 ..... 1010111 @r2
+vslideup_vx 001110 . ..... ..... 100 ..... 1010111 @r_vm
+vslideup_vi 001110 . ..... ..... 011 ..... 1010111 @r_vm
+vslide1up_vx 001110 . ..... ..... 110 ..... 1010111 @r_vm
+vslidedown_vx 001111 . ..... ..... 100 ..... 1010111 @r_vm
+vslidedown_vi 001111 . ..... ..... 011 ..... 1010111 @r_vm
+vslide1down_vx 001111 . ..... ..... 110 ..... 1010111 @r_vm
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
diff --git a/target/riscv/insn_trans/trans_rvv.inc.c
b/target/riscv/insn_trans/trans_rvv.inc.c
index 99cd45b0aa..ef5960ba39 100644
--- a/target/riscv/insn_trans/trans_rvv.inc.c
+++ b/target/riscv/insn_trans/trans_rvv.inc.c
@@ -2316,3 +2316,20 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f
*a)
}
return false;
}
+
+/* Vector Slide Instructions */
+static bool slideup_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s, RVV) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (a->rd != a->rs2));
+}
+GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
+GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
+GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
+
+GEN_OPIVX_TRANS(vslidedown_vx, opivx_check)
+GEN_OPIVX_TRANS(vslide1down_vx, opivx_check)
+GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check)
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 3235c3fbe1..2219fdd6c5 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -4511,3 +4511,139 @@ uint64_t HELPER(vfmv_f_s_d)(void *vs2, CPURISCVState
*env)
return deposit64(*((uint64_t *)vs2), 32, 32, 0xffffffff);
}
}
+
+/* Vector Slide Instructions */
+/*
+ * the spec doesn't specify the behavior when offset is lager than vl,
+ * just truncate the offset to vl here.
+ */
+#define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t offset = s1, i; \
+ \
+ if (offset > vl) { \
+ offset = vl; \
+ } \
+ for (i = 0; i < vl; i++) { \
+ if (((i < offset)) || (!vm && !vext_elem_mask(v0, mlen, i))) { \
+ continue; \
+ } \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
+ } \
+ if (i == 0) { \
+ return; \
+ } \
+ for (; i < vlmax; i++) { \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+ } \
+}
+/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
+GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d, uint64_t, H8, clearq)
+
+#define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t offset = s1, i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ if (i + offset < vlmax) { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + offset)); \
+ } else { \
+ *((ETYPE *)vd + H(i)) = 0; \
+ } \
+ } \
+ if (i == 0) { \
+ return; \
+ } \
+ for (; i < vlmax; i++) { \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+ } \
+}
+/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
+GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d, uint64_t, H8, clearq)
+
+#define GEN_VEXT_VSLIDE1UP_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ if (i == 0) { \
+ *((ETYPE *)vd + H(i)) = s1; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
+ } \
+ } \
+ if (i == 0) { \
+ return; \
+ } \
+ for (; i < vlmax; i++) { \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+ } \
+}
+/* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d, uint64_t, H8, clearq)
+
+#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ if (i == vl - 1) { \
+ *((ETYPE *)vd + H(i)) = s1; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
+ } \
+ } \
+ if (i == 0) { \
+ return; \
+ } \
+ for (; i < vlmax; i++) { \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+ } \
+}
+/* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq)
--
2.23.0
- Re: [PATCH v5 54/60] target/riscv: integer extract instruction, (continued)
[PATCH v5 55/60] target/riscv: integer scalar move instruction, LIU Zhiwei, 2020/03/12
[PATCH v5 56/60] target/riscv: floating-point scalar move instructions, LIU Zhiwei, 2020/03/12
[PATCH v5 57/60] target/riscv: vector slide instructions,
LIU Zhiwei <=
- Re: [PATCH v5 57/60] target/riscv: vector slide instructions, Richard Henderson, 2020/03/15
- Re: [PATCH v5 57/60] target/riscv: vector slide instructions, LIU Zhiwei, 2020/03/16
- Re: [PATCH v5 57/60] target/riscv: vector slide instructions, Richard Henderson, 2020/03/16
- Re: [PATCH v5 57/60] target/riscv: vector slide instructions, LIU Zhiwei, 2020/03/24
- Re: [PATCH v5 57/60] target/riscv: vector slide instructions, Richard Henderson, 2020/03/24
[PATCH v5 58/60] target/riscv: vector register gather instruction, LIU Zhiwei, 2020/03/12
[PATCH v5 59/60] target/riscv: vector compress instruction, LIU Zhiwei, 2020/03/12