qemu-riscv
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 2/2] target/riscv: Check 'A' and split extensions for atomic


From: Rob Bradford
Subject: Re: [PATCH 2/2] target/riscv: Check 'A' and split extensions for atomic instructions
Date: Fri, 19 Jan 2024 11:10:19 +0000
User-agent: Evolution 3.48.4 (3.48.4-1.module_f38+17164+63eeee4a)

On Thu, 2024-01-18 at 16:49 -0300, Daniel Henrique Barboza wrote:
> 
> 
> On 1/15/24 13:25, Rob Bradford wrote:
> > Following the pattern for 'M' and Zmmul check if either the 'A'
> > extension is enabled or the appropriate split extension for the
> > instruction.
> > 
> > Also remove the assumption that only checking for 64-bit systems is
> > required for the double word variants.
> 
> Code LGTM but I don't understand what you mean in this sentence. The
> patch
> is replacing REQUIRE_EXT(ctx, RVA) for either REQUIRE_A_OR_ZALRSC()
> or
> REQUIRE_A_OR_ZAAMO(). There's no removal or change in any 64-bit
> line,
> IIUC.
> 
> 

Thank you Daniel, this a commit message artefact from before I posted
the patch adding RVA check to 64-bit atomic versions. In the end I
decided to split the work and forgot to update this patch message - I
will respin this series with that fixed.

Cheers,

Rob

> Thanks,
> 
> Daniel
> 
> > 
> > Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
> > ---
> >   target/riscv/insn_trans/trans_rva.c.inc | 56 +++++++++++++++-----
> > -----
> >   1 file changed, 34 insertions(+), 22 deletions(-)
> > 
> > diff --git a/target/riscv/insn_trans/trans_rva.c.inc
> > b/target/riscv/insn_trans/trans_rva.c.inc
> > index f0368de3e4..267930e5bc 100644
> > --- a/target/riscv/insn_trans/trans_rva.c.inc
> > +++ b/target/riscv/insn_trans/trans_rva.c.inc
> > @@ -18,6 +18,18 @@
> >    * this program.  If not, see <http://www.gnu.org/licenses/>.
> >    */
> >   
> > +#define REQUIRE_A_OR_ZAAMO(ctx) do {                      \
> > +    if (!ctx->cfg_ptr->ext_zaamo && !has_ext(ctx, RVA)) { \
> > +        return false;                                     \
> > +    }                                                     \
> > +} while (0)
> > +
> > +#define REQUIRE_A_OR_ZALRSC(ctx) do {                      \
> > +    if (!ctx->cfg_ptr->ext_zalrsc && !has_ext(ctx, RVA)) { \
> > +        return false;                                     \
> > +    }                                                     \
> > +} while (0)
> > +
> >   static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
> >   {
> >       TCGv src1;
> > @@ -96,143 +108,143 @@ static bool gen_amo(DisasContext *ctx,
> > arg_atomic *a,
> >   
> >   static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZALRSC(ctx);
> >       return gen_lr(ctx, a, (MO_ALIGN | MO_TESL));
> >   }
> >   
> >   static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZALRSC(ctx);
> >       return gen_sc(ctx, a, (MO_ALIGN | MO_TESL));
> >   }
> >   
> >   static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN |
> > MO_TESL));
> >   }
> >   
> >   static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl,
> > (MO_ALIGN | MO_TESL));
> >   }
> >   
> >   static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl,
> > (MO_ALIGN | MO_TESL));
> >   }
> >   
> >   static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl,
> > (MO_ALIGN | MO_TESL));
> >   }
> >   
> >   static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN
> > | MO_TESL));
> >   }
> >   
> >   static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl,
> > (MO_ALIGN | MO_TESL));
> >   }
> >   
> >   static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl,
> > (MO_ALIGN | MO_TESL));
> >   }
> >   
> >   static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl,
> > (MO_ALIGN | MO_TESL));
> >   }
> >   
> >   static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a)
> >   {
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl,
> > (MO_ALIGN | MO_TESL));
> >   }
> >   
> >   static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZALRSC(ctx);
> >       return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ);
> >   }
> >   
> >   static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZALRSC(ctx);
> >       return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ));
> >   }
> >   
> >   static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN |
> > MO_TEUQ));
> >   }
> >   
> >   static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl,
> > (MO_ALIGN | MO_TEUQ));
> >   }
> >   
> >   static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl,
> > (MO_ALIGN | MO_TEUQ));
> >   }
> >   
> >   static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl,
> > (MO_ALIGN | MO_TEUQ));
> >   }
> >   
> >   static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN
> > | MO_TEUQ));
> >   }
> >   
> >   static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl,
> > (MO_ALIGN | MO_TEUQ));
> >   }
> >   
> >   static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl,
> > (MO_ALIGN | MO_TEUQ));
> >   }
> >   
> >   static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl,
> > (MO_ALIGN | MO_TEUQ));
> >   }
> >   
> >   static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
> >   {
> >       REQUIRE_64BIT(ctx);
> > -    REQUIRE_EXT(ctx, RVA);
> > +    REQUIRE_A_OR_ZAAMO(ctx);
> >       return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl,
> > (MO_ALIGN | MO_TEUQ));
> >   }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]