[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL v2 52/91] target/ppc: Split out gen_vx_vmul10
From: |
Richard Henderson |
Subject: |
[PULL v2 52/91] target/ppc: Split out gen_vx_vmul10 |
Date: |
Thu, 9 Mar 2023 12:05:11 -0800 |
Move the body out of this large macro.
Use tcg_constant_i64.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/ppc/translate/vmx-impl.c.inc | 95 +++++++++++++++--------------
1 file changed, 49 insertions(+), 46 deletions(-)
diff --git a/target/ppc/translate/vmx-impl.c.inc
b/target/ppc/translate/vmx-impl.c.inc
index 05ba9c9492..ee656d6a44 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -171,53 +171,56 @@ static void gen_mtvscr(DisasContext *ctx)
gen_helper_mtvscr(cpu_env, val);
}
+static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry)
+{
+ TCGv_i64 t0;
+ TCGv_i64 t1;
+ TCGv_i64 t2;
+ TCGv_i64 avr;
+ TCGv_i64 ten, z;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ avr = tcg_temp_new_i64();
+ ten = tcg_constant_i64(10);
+ z = tcg_constant_i64(0);
+
+ if (add_cin) {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ get_avr64(avr, rB(ctx->opcode), false);
+ tcg_gen_andi_i64(t2, avr, 0xF);
+ tcg_gen_add2_i64(avr, t2, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), false);
+ tcg_gen_mulu2_i64(avr, t2, avr, ten);
+ set_avr64(rD(ctx->opcode), avr, false);
+ }
+
+ if (ret_carry) {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mulu2_i64(t0, t1, avr, ten);
+ tcg_gen_add2_i64(t0, avr, t0, t1, t2, z);
+ set_avr64(rD(ctx->opcode), avr, false);
+ set_avr64(rD(ctx->opcode), z, true);
+ } else {
+ get_avr64(avr, rA(ctx->opcode), true);
+ tcg_gen_mul_i64(t0, avr, ten);
+ tcg_gen_add_i64(avr, t0, t2);
+ set_avr64(rD(ctx->opcode), avr, true);
+ }
+}
+
#define GEN_VX_VMUL10(name, add_cin, ret_carry) \
-static void glue(gen_, name)(DisasContext *ctx) \
-{ \
- TCGv_i64 t0; \
- TCGv_i64 t1; \
- TCGv_i64 t2; \
- TCGv_i64 avr; \
- TCGv_i64 ten, z; \
- \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- \
- t0 = tcg_temp_new_i64(); \
- t1 = tcg_temp_new_i64(); \
- t2 = tcg_temp_new_i64(); \
- avr = tcg_temp_new_i64(); \
- ten = tcg_const_i64(10); \
- z = tcg_const_i64(0); \
- \
- if (add_cin) { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- get_avr64(avr, rB(ctx->opcode), false); \
- tcg_gen_andi_i64(t2, avr, 0xF); \
- tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), false); \
- tcg_gen_mulu2_i64(avr, t2, avr, ten); \
- set_avr64(rD(ctx->opcode), avr, false); \
- } \
- \
- if (ret_carry) { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mulu2_i64(t0, t1, avr, ten); \
- tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \
- set_avr64(rD(ctx->opcode), avr, false); \
- set_avr64(rD(ctx->opcode), z, true); \
- } else { \
- get_avr64(avr, rA(ctx->opcode), true); \
- tcg_gen_mul_i64(t0, avr, ten); \
- tcg_gen_add_i64(avr, t0, t2); \
- set_avr64(rD(ctx->opcode), avr, true); \
- } \
-} \
+ static void glue(gen_, name)(DisasContext *ctx) \
+ { gen_vx_vmul10(ctx, add_cin, ret_carry); }
GEN_VX_VMUL10(vmul10uq, 0, 0);
GEN_VX_VMUL10(vmul10euq, 1, 0);
--
2.34.1
- [PULL v2 40/91] target/avr: Avoid use of tcg_const_i32 throughout, (continued)
- [PULL v2 40/91] target/avr: Avoid use of tcg_const_i32 throughout, Richard Henderson, 2023/03/09
- [PULL v2 41/91] target/cris: Avoid use of tcg_const_i32 throughout, Richard Henderson, 2023/03/09
- [PULL v2 42/91] target/hppa: Avoid tcg_const_i64 in trans_fid_f, Richard Henderson, 2023/03/09
- [PULL v2 43/91] target/hppa: Avoid use of tcg_const_i32 throughout, Richard Henderson, 2023/03/09
- [PULL v2 45/91] target/m68k: Avoid tcg_const_i32 when modified, Richard Henderson, 2023/03/09
- [PULL v2 46/91] target/m68k: Avoid tcg_const_i32 in bfop_reg, Richard Henderson, 2023/03/09
- [PULL v2 44/91] target/i386: Avoid use of tcg_const_* throughout, Richard Henderson, 2023/03/09
- [PULL v2 48/91] target/mips: Split out gen_lxl, Richard Henderson, 2023/03/09
- [PULL v2 47/91] target/m68k: Avoid tcg_const_* throughout, Richard Henderson, 2023/03/09
- [PULL v2 49/91] target/mips: Split out gen_lxr, Richard Henderson, 2023/03/09
- [PULL v2 52/91] target/ppc: Split out gen_vx_vmul10,
Richard Henderson <=
- [PULL v2 50/91] target/mips: Avoid tcg_const_tl in gen_r6_ld, Richard Henderson, 2023/03/09
- [PULL v2 54/91] target/rx: Use tcg_gen_abs_i32, Richard Henderson, 2023/03/09
- [PULL v2 55/91] target/rx: Use cpu_psw_z as temp in flags computation, Richard Henderson, 2023/03/09
- [PULL v2 51/91] target/mips: Avoid tcg_const_* throughout, Richard Henderson, 2023/03/09
- [PULL v2 53/91] target/ppc: Avoid tcg_const_i64 in do_vector_shift_quad, Richard Henderson, 2023/03/09
- [PULL v2 56/91] target/rx: Avoid tcg_const_i32 when new temp needed, Richard Henderson, 2023/03/09
- [PULL v2 57/91] target/rx: Avoid tcg_const_i32, Richard Henderson, 2023/03/09
- [PULL v2 58/91] target/s390x: Avoid tcg_const_i64, Richard Henderson, 2023/03/09
- [PULL v2 60/91] target/sh4: Avoid tcg_const_i32, Richard Henderson, 2023/03/09
- [PULL v2 59/91] target/sh4: Avoid tcg_const_i32 for TAS.B, Richard Henderson, 2023/03/09