[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [PATCH qemu v14 14/15] target/riscv: rvv: Add tail agnostic for vect
From: |
Alistair Francis |
Subject: |
Re: [PATCH qemu v14 14/15] target/riscv: rvv: Add tail agnostic for vector permutation instructions |
Date: |
Tue, 10 May 2022 12:11:56 +0200 |
On Tue, May 3, 2022 at 9:48 AM ~eopxd <eopxd@git.sr.ht> wrote:
>
> From: eopXD <eop.chen@sifive.com>
>
> Signed-off-by: eop Chen <eop.chen@sifive.com>
> Reviewed-by: Frank Chang <frank.chang@sifive.com>
> Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/insn_trans/trans_rvv.c.inc | 23 ++++++++++++++
> target/riscv/vector_helper.c | 40 +++++++++++++++++++++++++
> 2 files changed, 63 insertions(+)
>
> diff --git a/target/riscv/insn_trans/trans_rvv.c.inc
> b/target/riscv/insn_trans/trans_rvv.c.inc
> index a27433b324..a12b1a23f1 100644
> --- a/target/riscv/insn_trans/trans_rvv.c.inc
> +++ b/target/riscv/insn_trans/trans_rvv.c.inc
> @@ -3746,6 +3746,16 @@ static bool trans_vrgather_vx(DisasContext *s,
> arg_rmrr *a)
> }
>
> if (a->vm && s->vl_eq_vlmax) {
> + if (s->vta && s->lmul < 0) {
> + /*
> + * tail elements may pass vlmax when lmul < 0
> + * set tail elements to 1s
> + */
> + uint32_t vlenb = s->cfg_ptr->vlen >> 3;
> + tcg_gen_gvec_ori(s->sew, vreg_ofs(s, a->rd),
> + vreg_ofs(s, a->rd), -1,
> + vlenb, vlenb);
> + }
> int scale = s->lmul - (s->sew + 3);
> int vlmax = scale < 0 ?
> s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen <<
> scale;
> @@ -3779,6 +3789,16 @@ static bool trans_vrgather_vi(DisasContext *s,
> arg_rmrr *a)
> }
>
> if (a->vm && s->vl_eq_vlmax) {
> + if (s->vta && s->lmul < 0) {
> + /*
> + * tail elements may pass vlmax when lmul < 0
> + * set tail elements to 1s
> + */
> + uint32_t vlenb = s->cfg_ptr->vlen >> 3;
> + tcg_gen_gvec_ori(s->sew, vreg_ofs(s, a->rd),
> + vreg_ofs(s, a->rd), -1,
> + vlenb, vlenb);
> + }
> int scale = s->lmul - (s->sew + 3);
> int vlmax = scale < 0 ?
> s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen <<
> scale;
> @@ -3831,6 +3851,7 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r
> *a)
> tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
>
> data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
> + data = FIELD_DP32(data, VDATA, VTA, s->vta);
> tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
> vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
> cpu_env, s->cfg_ptr->vlen / 8,
> @@ -3936,6 +3957,8 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a,
> uint8_t seq)
> }
>
> data = FIELD_DP32(data, VDATA, VM, a->vm);
> + data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
> + data = FIELD_DP32(data, VDATA, VTA, s->vta);
>
> tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
> vreg_ofs(s, a->rs2), cpu_env,
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index dcb6d3538c..8b3833b299 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -4933,6 +4933,9 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
> void *vs2, \
> { \
> uint32_t vm = vext_vm(desc); \
> uint32_t vl = env->vl; \
> + uint32_t esz = sizeof(ETYPE); \
> + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> + uint32_t vta = vext_vta(desc); \
> target_ulong offset = s1, i_min, i; \
> \
> i_min = MAX(env->vstart, offset); \
> @@ -4942,6 +4945,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
> void *vs2, \
> } \
> *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
> } \
> + /* set tail elements to 1s */ \
> + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
> @@ -4957,6 +4962,9 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
> void *vs2, \
> uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
> uint32_t vm = vext_vm(desc); \
> uint32_t vl = env->vl; \
> + uint32_t esz = sizeof(ETYPE); \
> + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> + uint32_t vta = vext_vta(desc); \
> target_ulong i_max, i; \
> \
> i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart); \
> @@ -4973,6 +4981,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
> void *vs2, \
> } \
> \
> env->vstart = 0; \
> + /* set tail elements to 1s */ \
> + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
> @@ -4988,6 +4998,9 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0,
> target_ulong s1, \
> typedef uint##BITWIDTH##_t ETYPE; \
> uint32_t vm = vext_vm(desc); \
> uint32_t vl = env->vl; \
> + uint32_t esz = sizeof(ETYPE); \
> + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> + uint32_t vta = vext_vta(desc); \
> uint32_t i; \
> \
> for (i = env->vstart; i < vl; i++) { \
> @@ -5001,6 +5014,8 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0,
> target_ulong s1, \
> } \
> } \
> env->vstart = 0; \
> + /* set tail elements to 1s */ \
> + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VSLIE1UP(8, H1)
> @@ -5028,6 +5043,9 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0,
> target_ulong s1, \
> typedef uint##BITWIDTH##_t ETYPE;
> \
> uint32_t vm = vext_vm(desc);
> \
> uint32_t vl = env->vl;
> \
> + uint32_t esz = sizeof(ETYPE);
> \
> + uint32_t total_elems = vext_get_total_elems(env, desc, esz);
> \
> + uint32_t vta = vext_vta(desc);
> \
> uint32_t i;
> \
>
> \
> for (i = env->vstart; i < vl; i++) {
> \
> @@ -5041,6 +5059,8 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0,
> target_ulong s1, \
> }
> \
> }
> \
> env->vstart = 0;
> \
> + /* set tail elements to 1s */
> \
> + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
> \
> }
>
> GEN_VEXT_VSLIDE1DOWN(8, H1)
> @@ -5094,6 +5114,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
> *vs2, \
> uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS2))); \
> uint32_t vm = vext_vm(desc); \
> uint32_t vl = env->vl; \
> + uint32_t esz = sizeof(TS2); \
> + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> + uint32_t vta = vext_vta(desc); \
> uint64_t index; \
> uint32_t i; \
> \
> @@ -5109,6 +5132,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
> *vs2, \
> } \
> } \
> env->vstart = 0; \
> + /* set tail elements to 1s */ \
> + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
> @@ -5129,6 +5154,9 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
> void *vs2, \
> uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
> uint32_t vm = vext_vm(desc); \
> uint32_t vl = env->vl; \
> + uint32_t esz = sizeof(ETYPE); \
> + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> + uint32_t vta = vext_vta(desc); \
> uint64_t index = s1; \
> uint32_t i; \
> \
> @@ -5143,6 +5171,8 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1,
> void *vs2, \
> } \
> } \
> env->vstart = 0; \
> + /* set tail elements to 1s */ \
> + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
> @@ -5157,6 +5187,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
> *vs2, \
> CPURISCVState *env, uint32_t desc) \
> { \
> uint32_t vl = env->vl; \
> + uint32_t esz = sizeof(ETYPE); \
> + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> + uint32_t vta = vext_vta(desc); \
> uint32_t num = 0, i; \
> \
> for (i = env->vstart; i < vl; i++) { \
> @@ -5167,6 +5200,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void
> *vs2, \
> num++; \
> } \
> env->vstart = 0; \
> + /* set tail elements to 1s */ \
> + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* Compress into vd elements of vs2 where vs1 is enabled */
> @@ -5203,6 +5238,9 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,
> \
> { \
> uint32_t vl = env->vl; \
> uint32_t vm = vext_vm(desc); \
> + uint32_t esz = sizeof(ETYPE); \
> + uint32_t total_elems = vext_get_total_elems(env, desc, esz); \
> + uint32_t vta = vext_vta(desc); \
> uint32_t i; \
> \
> for (i = env->vstart; i < vl; i++) { \
> @@ -5212,6 +5250,8 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2,
> \
> *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
> } \
> env->vstart = 0; \
> + /* set tail elements to 1s */ \
> + vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1)
> --
> 2.34.2
>
>
- Re: [PATCH qemu v14 05/15] target/riscv: rvv: Add tail agnostic for vector load / store instructions, (continued)
- [PATCH qemu v14 08/15] target/riscv: rvv: Add tail agnostic for vector integer comparison instructions, ~eopxd, 2022/05/03
- [PATCH qemu v14 06/15] target/riscv: rvv: Add tail agnostic for vx, vvm, vxm instructions, ~eopxd, 2022/05/03
- [PATCH qemu v14 12/15] target/riscv: rvv: Add tail agnostic for vector reduction instructions, ~eopxd, 2022/05/03
- [PATCH qemu v14 10/15] target/riscv: rvv: Add tail agnostic for vector fix-point arithmetic instructions, ~eopxd, 2022/05/03
- [PATCH qemu v14 14/15] target/riscv: rvv: Add tail agnostic for vector permutation instructions, ~eopxd, 2022/05/03
- Re: [PATCH qemu v14 14/15] target/riscv: rvv: Add tail agnostic for vector permutation instructions,
Alistair Francis <=
- [PATCH qemu v14 15/15] target/riscv: rvv: Add option 'rvv_ta_all_1s' to enable optional tail agnostic behavior, ~eopxd, 2022/05/03
- [PATCH qemu v14 13/15] target/riscv: rvv: Add tail agnostic for vector mask instructions, ~eopxd, 2022/05/03
- [PATCH qemu v14 09/15] target/riscv: rvv: Add tail agnostic for vector integer merge and move instructions, ~eopxd, 2022/05/03
- [PATCH qemu v14 11/15] target/riscv: rvv: Add tail agnostic for vector floating-point instructions, ~eopxd, 2022/05/03
- Re: [PATCH qemu v14 00/15] Add tail agnostic behavior for rvv instructions, Alistair Francis, 2022/05/10