qemu-ppc
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 1/4] ppc/xive: Use address_space routines to access the machi


From: Cédric Le Goater
Subject: Re: [PATCH 1/4] ppc/xive: Use address_space routines to access the machine RAM
Date: Thu, 31 Aug 2023 09:54:55 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Thunderbird/102.13.0

On 8/31/23 09:44, Frederic Barrat wrote:


On 29/08/2023 16:32, Cédric Le Goater wrote:
to log an error in case of bad configuration of the XIVE tables by the FW.

Signed-off-by: Cédric Le Goater <clg@kaod.org>
---
  hw/intc/pnv_xive.c  | 27 +++++++++++++++++++++++----
  hw/intc/pnv_xive2.c | 27 +++++++++++++++++++++++----
  2 files changed, 46 insertions(+), 8 deletions(-)

diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c
index e536b3ec26e5..b2bafd61b157 100644
--- a/hw/intc/pnv_xive.c
+++ b/hw/intc/pnv_xive.c
@@ -242,12 +242,20 @@ static int pnv_xive_vst_read(PnvXive *xive, uint32_t 
type, uint8_t blk,
  {
      const XiveVstInfo *info = &vst_infos[type];
      uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
+    MemTxResult result;
      if (!addr) {
          return -1;
      }
-    cpu_physical_memory_read(addr, data, info->size);
+    result = address_space_read(&address_space_memory, addr,
+                                MEMTXATTRS_UNSPECIFIED, data,
+                                info->size);


I had been wondering which is the "right" API to update the guest memory. Since the cpu_physical_memory_* family ends up calling its address_space_* equivalent, I'm guessing the point of the change is really to catch any error

yes.

and remove any potential ambiguity about the address space?

yes. See LPC and XSCOM for other address spaces in the PowerNV domain. Also,
the XIVE EDT but that's more a modeling trick.

In this case, the IC is reading RAM using a physical address, so checking
that the transaction status is important. The FW could configure bogus
addresses.


In any case,
Reviewed-by: Frederic Barrat <fbarrat@linux.ibm.com>


Thanks,

C.


   Fred


+    if (result != MEMTX_OK) {
+        xive_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
+                   " for VST %s %x/%x\n", addr, info->name, blk, idx);
+        return -1;
+    }
      return 0;
  }
@@ -258,16 +266,27 @@ static int pnv_xive_vst_write(PnvXive *xive, uint32_t 
type, uint8_t blk,
  {
      const XiveVstInfo *info = &vst_infos[type];
      uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
+    MemTxResult result;
      if (!addr) {
          return -1;
      }
      if (word_number == XIVE_VST_WORD_ALL) {
-        cpu_physical_memory_write(addr, data, info->size);
+        result = address_space_write(&address_space_memory, addr,
+                                     MEMTXATTRS_UNSPECIFIED, data,
+                                     info->size);
      } else {
-        cpu_physical_memory_write(addr + word_number * 4,
-                                  data + word_number * 4, 4);
+        result = address_space_write(&address_space_memory,
+                                     addr + word_number * 4,
+                                     MEMTXATTRS_UNSPECIFIED,
+                                     data + word_number * 4, 4);
+    }
+
+    if (result != MEMTX_OK) {
+        xive_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
+                    "for VST %s %x/%x\n", addr, info->name, blk, idx);
+        return -1;
      }
      return 0;
  }
diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c
index bbb44a533cff..4b8d0a5d8120 100644
--- a/hw/intc/pnv_xive2.c
+++ b/hw/intc/pnv_xive2.c
@@ -240,12 +240,20 @@ static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t 
type, uint8_t blk,
  {
      const XiveVstInfo *info = &vst_infos[type];
      uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
+    MemTxResult result;
      if (!addr) {
          return -1;
      }
-    cpu_physical_memory_read(addr, data, info->size);
+    result = address_space_read(&address_space_memory, addr,
+                                MEMTXATTRS_UNSPECIFIED, data,
+                                info->size);
+    if (result != MEMTX_OK) {
+        xive2_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
+                   " for VST %s %x/%x\n", addr, info->name, blk, idx);
+        return -1;
+    }
      return 0;
  }
@@ -256,16 +264,27 @@ static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t 
type, uint8_t blk,
  {
      const XiveVstInfo *info = &vst_infos[type];
      uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
+    MemTxResult result;
      if (!addr) {
          return -1;
      }
      if (word_number == XIVE_VST_WORD_ALL) {
-        cpu_physical_memory_write(addr, data, info->size);
+        result = address_space_write(&address_space_memory, addr,
+                                     MEMTXATTRS_UNSPECIFIED, data,
+                                     info->size);
      } else {
-        cpu_physical_memory_write(addr + word_number * 4,
-                                  data + word_number * 4, 4);
+        result = address_space_write(&address_space_memory,
+                                     addr + word_number * 4,
+                                     MEMTXATTRS_UNSPECIFIED,
+                                     data + word_number * 4, 4);
+    }
+
+    if (result != MEMTX_OK) {
+        xive2_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
+                   "for VST %s %x/%x\n", addr, info->name, blk, idx);
+        return -1;
      }
      return 0;
  }




reply via email to

[Prev in Thread] Current Thread [Next in Thread]