[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 12/24] tcg: Convert tcg_gen_dupi_vec to TCG_CONST
From: |
Richard Henderson |
Subject: |
[PULL 12/24] tcg: Convert tcg_gen_dupi_vec to TCG_CONST |
Date: |
Wed, 13 Jan 2021 16:16:42 -1000 |
Because we now store uint64_t in TCGTemp, we can now always
store the full 64-bit duplicate immediate. So remove the
difference between 32- and 64-bit hosts.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 9 ++++-----
tcg/tcg-op-vec.c | 39 ++++++++++-----------------------------
tcg/tcg.c | 7 +------
3 files changed, 15 insertions(+), 40 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index bda727d5ed..dbb03ef96b 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1120,11 +1120,10 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_dup2_vec:
assert(TCG_TARGET_REG_BITS == 32);
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- tmp = arg_info(op->args[1])->val;
- if (tmp == arg_info(op->args[2])->val) {
- tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp);
- break;
- }
+ tcg_opt_gen_movi(s, &temps_used, op, op->args[0],
+ deposit64(arg_info(op->args[1])->val, 32, 32,
+ arg_info(op->args[2])->val));
+ break;
} else if (args_are_copies(op->args[1], op->args[2])) {
op->opc = INDEX_op_dup_vec;
TCGOP_VECE(op) = MO_32;
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index cdbf11c573..9fbed1366c 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -216,25 +216,17 @@ void tcg_gen_mov_vec(TCGv_vec r, TCGv_vec a)
}
}
-#define MO_REG (TCG_TARGET_REG_BITS == 64 ? MO_64 : MO_32)
-
-static void do_dupi_vec(TCGv_vec r, unsigned vece, TCGArg a)
-{
- TCGTemp *rt = tcgv_vec_temp(r);
- vec_gen_2(INDEX_op_dupi_vec, rt->base_type, vece, temp_arg(rt), a);
-}
-
TCGv_vec tcg_const_zeros_vec(TCGType type)
{
TCGv_vec ret = tcg_temp_new_vec(type);
- do_dupi_vec(ret, MO_REG, 0);
+ tcg_gen_dupi_vec(MO_64, ret, 0);
return ret;
}
TCGv_vec tcg_const_ones_vec(TCGType type)
{
TCGv_vec ret = tcg_temp_new_vec(type);
- do_dupi_vec(ret, MO_REG, -1);
+ tcg_gen_dupi_vec(MO_64, ret, -1);
return ret;
}
@@ -252,39 +244,28 @@ TCGv_vec tcg_const_ones_vec_matching(TCGv_vec m)
void tcg_gen_dup64i_vec(TCGv_vec r, uint64_t a)
{
- if (TCG_TARGET_REG_BITS == 64) {
- do_dupi_vec(r, MO_64, a);
- } else if (a == dup_const(MO_32, a)) {
- do_dupi_vec(r, MO_32, a);
- } else {
- TCGv_i64 c = tcg_const_i64(a);
- tcg_gen_dup_i64_vec(MO_64, r, c);
- tcg_temp_free_i64(c);
- }
+ tcg_gen_dupi_vec(MO_64, r, a);
}
void tcg_gen_dup32i_vec(TCGv_vec r, uint32_t a)
{
- do_dupi_vec(r, MO_REG, dup_const(MO_32, a));
+ tcg_gen_dupi_vec(MO_32, r, a);
}
void tcg_gen_dup16i_vec(TCGv_vec r, uint32_t a)
{
- do_dupi_vec(r, MO_REG, dup_const(MO_16, a));
+ tcg_gen_dupi_vec(MO_16, r, a);
}
void tcg_gen_dup8i_vec(TCGv_vec r, uint32_t a)
{
- do_dupi_vec(r, MO_REG, dup_const(MO_8, a));
+ tcg_gen_dupi_vec(MO_8, r, a);
}
void tcg_gen_dupi_vec(unsigned vece, TCGv_vec r, uint64_t a)
{
- if (vece == MO_64) {
- tcg_gen_dup64i_vec(r, a);
- } else {
- do_dupi_vec(r, MO_REG, dup_const(vece, a));
- }
+ TCGTemp *rt = tcgv_vec_temp(r);
+ tcg_gen_mov_vec(r, tcg_constant_vec(rt->base_type, vece, a));
}
void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec r, TCGv_i64 a)
@@ -489,8 +470,8 @@ void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
if (tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0) {
tcg_gen_sari_vec(vece, t, a, (8 << vece) - 1);
} else {
- do_dupi_vec(t, MO_REG, 0);
- tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a, t);
+ tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a,
+ tcg_constant_vec(type, vece, 0));
}
tcg_gen_xor_vec(vece, r, a, t);
tcg_gen_sub_vec(vece, r, r, t);
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 802f0b8a32..ad1348d811 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -3539,16 +3539,11 @@ static void temp_load(TCGContext *s, TCGTemp *ts,
TCGRegSet desired_regs,
* The targets will, in general, have to do this search anyway,
* do this generically.
*/
- if (TCG_TARGET_REG_BITS == 32) {
- val = dup_const(MO_32, val);
- vece = MO_32;
- }
if (val == dup_const(MO_8, val)) {
vece = MO_8;
} else if (val == dup_const(MO_16, val)) {
vece = MO_16;
- } else if (TCG_TARGET_REG_BITS == 64 &&
- val == dup_const(MO_32, val)) {
+ } else if (val == dup_const(MO_32, val)) {
vece = MO_32;
}
--
2.25.1
- [PULL 04/24] tcg: Add temp_readonly, (continued)
- [PULL 04/24] tcg: Add temp_readonly, Richard Henderson, 2021/01/13
- [PULL 02/24] tcg: Increase tcg_out_dupi_vec immediate to int64_t, Richard Henderson, 2021/01/13
- [PULL 03/24] tcg: Consolidate 3 bits into enum TCGTempKind, Richard Henderson, 2021/01/13
- [PULL 05/24] tcg: Expand TCGTemp.val to 64-bits, Richard Henderson, 2021/01/13
- [PULL 06/24] tcg: Rename struct tcg_temp_info to TempOptInfo, Richard Henderson, 2021/01/13
- [PULL 07/24] tcg: Expand TempOptInfo to 64-bits, Richard Henderson, 2021/01/13
- [PULL 08/24] tcg: Introduce TYPE_CONST temporaries, Richard Henderson, 2021/01/13
- [PULL 09/24] tcg/optimize: Improve find_better_copy, Richard Henderson, 2021/01/13
- [PULL 10/24] tcg/optimize: Adjust TempOptInfo allocation, Richard Henderson, 2021/01/13
- [PULL 13/24] tcg: Use tcg_constant_i32 with icount expander, Richard Henderson, 2021/01/13
- [PULL 12/24] tcg: Convert tcg_gen_dupi_vec to TCG_CONST,
Richard Henderson <=
- [PULL 19/24] tcg: Add tcg_reg_alloc_dup2, Richard Henderson, 2021/01/13
- [PULL 11/24] tcg/optimize: Use tcg_constant_internal with constant folding, Richard Henderson, 2021/01/13
[PULL 14/24] tcg: Use tcg_constant_{i32,i64} with tcg int expanders, Richard Henderson, 2021/01/13
[PULL 15/24] tcg: Use tcg_constant_{i32,i64} with tcg plugins, Richard Henderson, 2021/01/13
[PULL 18/24] tcg: Remove movi and dupi opcodes, Richard Henderson, 2021/01/13
[PULL 16/24] tcg: Use tcg_constant_{i32,i64,vec} with gvec expanders, Richard Henderson, 2021/01/13