qemu-riscv
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2] target/riscv: Call probe_write() before atomic operations


From: Alistair Francis
Subject: [PATCH v2] target/riscv: Call probe_write() before atomic operations
Date: Fri, 1 Apr 2022 11:49:20 +1000

From: Alistair Francis <alistair.francis@wdc.com>

If an atomic operation fails on RISC-V we want to generate a store/amo
fault and not a load fault.

Currently if we have no permissions to access the memory location the
atomic operation will sometimes fail with a load fault (depending on the
path taken in tcg/tcg-op.c) as the atomic helpers perform a load then a
store.

By performing a probe_write() on the memory first, we can ensure we have
permissions to perform the atomic operation. As RISC-V doesn't have
write only pages this should be pretty robust (PMP might be the
exception).

Note that this only fixes the fault for memory regions. I/O and
non-existant regions will still trigger a load fault.

Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
---
 target/riscv/cpu.h                      |  2 ++
 target/riscv/helper.h                   |  2 ++
 target/riscv/cpu_helper.c               |  2 +-
 target/riscv/op_helper.c                | 14 ++++++++++++++
 target/riscv/insn_trans/trans_rva.c.inc |  3 +++
 5 files changed, 22 insertions(+), 1 deletion(-)

diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index c069fe85fa..c215cd1b6a 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -179,6 +179,8 @@ struct CPUArchState {
     uint64_t mie;
     uint64_t mideleg;
 
+    bool amo_store_fault;
+
     target_ulong satp;   /* since: priv-1.10.0 */
     target_ulong stval;
     target_ulong medeleg;
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 26bbab2fab..12f8a0acea 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -106,6 +106,8 @@ DEF_HELPER_1(wfi, void, env)
 DEF_HELPER_1(tlb_flush, void, env)
 #endif
 
+DEF_HELPER_3(atomic_check, void, env, tl, int)
+
 /* Hypervisor functions */
 #ifndef CONFIG_USER_ONLY
 DEF_HELPER_1(hyp_tlb_flush, void, env)
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 1c60fb2e80..294687f001 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -1139,7 +1139,7 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr 
physaddr,
     RISCVCPU *cpu = RISCV_CPU(cs);
     CPURISCVState *env = &cpu->env;
 
-    if (access_type == MMU_DATA_STORE) {
+    if (access_type == MMU_DATA_STORE || env->amo_store_fault) {
         cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
     } else if (access_type == MMU_DATA_LOAD) {
         cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 1a75ba11e6..d0343b15f8 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -114,6 +114,20 @@ target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
     return int128_getlo(rv);
 }
 
+void helper_atomic_check(CPURISCVState *env, target_ulong address,
+                         int mmu_idx)
+{
+#ifndef CONFIG_USER_ONLY
+    void *phost;
+    int ret = probe_access_flags(env, address, MMU_DATA_STORE, mmu_idx, false,
+                                 &phost, GETPC());
+
+    if (ret & TLB_MMIO) {
+        env->amo_store_fault = true;
+    }
+#endif
+}
+
 #ifndef CONFIG_USER_ONLY
 
 target_ulong helper_sret(CPURISCVState *env)
diff --git a/target/riscv/insn_trans/trans_rva.c.inc 
b/target/riscv/insn_trans/trans_rva.c.inc
index 45db82c9be..b3e05613d7 100644
--- a/target/riscv/insn_trans/trans_rva.c.inc
+++ b/target/riscv/insn_trans/trans_rva.c.inc
@@ -83,6 +83,9 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
     TCGv dest = dest_gpr(ctx, a->rd);
     TCGv src1 = get_address(ctx, a->rs1, 0);
     TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+    TCGv_i32 mem_idx = tcg_constant_i32(ctx->mem_idx);
+
+    gen_helper_atomic_check(cpu_env, src1, mem_idx);
 
     func(dest, src1, src2, ctx->mem_idx, mop);
 
-- 
2.35.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]