At present, there're many store/load variants defined in
memory_ldst.c.inc.

However, the Bytes::store() and Bytes::load() of vm-memory are bound
with AtomicAccess trait, which makes it (almost) impossible to select
proper interface (for l, w or q) based on specific type.

So it's necessary to provide such interfaces that hide the type details
as much as possible. And compared with address_space_st{size} or
address_space_ld{size}, the differences include:

 * No translation, and only memory access.

 * Only support native endian. Then Rust side must handle the endian
   format before/after processing store()/load().

 * Use bytes array instead of single uint64_t for the value to be
   written or read. Then Rust side doesn't need to convert generic type
   to u64.

   - But the extra cost is that there's the need for conversation
     between bytes array and uint64_t inside the interfaces.

 * Do not handle the cross-region case via MMIO access. Then Rust side
   will handle such abnormal cases.

Signed-off-by: Zhao Liu <zhao1....@intel.com>
---
 include/system/memory.h | 52 +++++++++++++++++++++++++++++++--
 system/physmem.c        | 65 +++++++++++++++++++++++++++++++++++++++--
 2 files changed, 112 insertions(+), 5 deletions(-)

diff --git a/include/system/memory.h b/include/system/memory.h
index a75c8c348f58..f0f51f1c4c89 100644
--- a/include/system/memory.h
+++ b/include/system/memory.h
@@ -3440,7 +3440,7 @@ MemTxResult 
section_rust_write_continue_step(MemoryRegionSection *section,
     MemTxAttrs attrs, const uint8_t *buf, hwaddr len, hwaddr mr_addr, hwaddr 
*l);
 
 /**
- * section_read_continue_step: read from #MemoryRegionSection.
+ * section_rust_read_continue_step: read from #MemoryRegionSection.
  *
  * Not: This function should only used by Rust side, and user shouldn't
  * call it directly!
@@ -3461,9 +3461,57 @@ MemTxResult 
section_rust_write_continue_step(MemoryRegionSection *section,
  * Return a MemTxResult indicating whether the operation succeeded
  * or failed.
  */
-MemTxResult section_read_continue_step(MemoryRegionSection *section,
+MemTxResult section_rust_read_continue_step(MemoryRegionSection *section,
     MemTxAttrs attrs, uint8_t *buf, hwaddr len, hwaddr mr_addr, hwaddr *l);
 
+/**
+ * section_rust_store: store data to #MemoryRegionSection.
+ *
+ * Not: This function should only used by Rust side, and user shouldn't
+ * call it directly!
+ *
+ * This function provides a wrapper for address_space_st{size} without
+ * translation, and only supports native endian by default.
+ *
+ * Should be called from an RCU critical section.
+ *
+ * @section: #MemoryRegionSection to be accessed.
+ * @mr_offset: address within that memory region.
+ * @buf: buffer to be written.
+ * @attrs: memory transaction attributes.
+ * @len: the number of bytes is expected to read.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed.
+ */
+MemTxResult section_rust_store(MemoryRegionSection *section,
+                               hwaddr mr_offset, const uint8_t *buf,
+                               MemTxAttrs attrs, hwaddr len);
+
+/**
+ * section_rust_load: load data from #MemoryRegionSection.
+ *
+ * Not: This function should only used by Rust side, and user shouldn't
+ * call it directly!
+ *
+ * This function provides a wrapper for address_space_st{size} without
+ * translation, and only supports native endian by default.
+ *
+ * Should be called from an RCU critical section.
+ *
+ * @section: #MemoryRegionSection to be accessed.
+ * @mr_offset: address within that memory region.
+ * @buf: buffer to be written.
+ * @attrs: memory transaction attributes.
+ * @len: the number of bytes is expected to read.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed.
+ */
+MemTxResult section_rust_load(MemoryRegionSection *section,
+                              hwaddr mr_offset, uint8_t *buf,
+                              MemTxAttrs attrs, hwaddr len);
+
 /*
  * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
  * to manage the actual amount of memory consumed by the VM (then, the memory
diff --git a/system/physmem.c b/system/physmem.c
index 0c30dea775ca..6048d5faac8c 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -3120,9 +3120,9 @@ static MemTxResult flatview_read_continue_step(MemTxAttrs 
attrs, uint8_t *buf,
 }
 
 MemTxResult
-section_read_continue_step(MemoryRegionSection *section, MemTxAttrs attrs,
-                           uint8_t *buf, hwaddr len, hwaddr mr_addr,
-                           hwaddr *l)
+section_rust_read_continue_step(MemoryRegionSection *section, MemTxAttrs attrs,
+                                uint8_t *buf, hwaddr len, hwaddr mr_addr,
+                                hwaddr *l)
 {
     return flatview_read_continue_step(attrs, buf, len, mr_addr, l, 
section->mr);
 }
@@ -3239,6 +3239,65 @@ void cpu_physical_memory_rw(hwaddr addr, void *buf,
                      buf, len, is_write);
 }
 
+MemTxResult section_rust_store(MemoryRegionSection *section,
+                               hwaddr mr_offset, const uint8_t *buf,
+                               MemTxAttrs attrs, hwaddr len)
+{
+    MemoryRegion *mr = section->mr;
+    MemTxResult r;
+    uint64_t val;
+
+    val = ldn_he_p(buf, len);
+    if (!memory_access_is_direct(mr, true, attrs)) {
+        bool release_lock = false;
+
+        release_lock |= prepare_mmio_access(mr);
+        r = memory_region_dispatch_write(mr, mr_offset, val,
+                                         size_memop(len) |
+                                         devend_memop(DEVICE_NATIVE_ENDIAN),
+                                         attrs);
+        if (release_lock) {
+            bql_unlock();
+        }
+    } else {
+        uint8_t *ptr = qemu_map_ram_ptr(mr->ram_block, mr_offset);
+        stn_p(ptr, len, val);
+        invalidate_and_set_dirty(mr, mr_offset, len);
+        r = MEMTX_OK;
+    }
+
+    return r;
+}
+
+MemTxResult section_rust_load(MemoryRegionSection *section,
+                              hwaddr mr_offset, uint8_t *buf,
+                              MemTxAttrs attrs, hwaddr len)
+{
+    MemoryRegion *mr = section->mr;
+    MemTxResult r;
+    uint64_t val;
+
+    if (!memory_access_is_direct(mr, false, attrs)) {
+        bool release_lock = false;
+
+        release_lock |= prepare_mmio_access(mr);
+        r = memory_region_dispatch_read(mr, mr_offset, &val,
+                                        size_memop(len) |
+                                        devend_memop(DEVICE_NATIVE_ENDIAN),
+                                        attrs);
+        if (release_lock) {
+            bql_unlock();
+        }
+    } else {
+        uint8_t *ptr = qemu_map_ram_ptr(mr->ram_block, mr_offset);
+        val = ldn_p(ptr, len);
+        r = MEMTX_OK;
+    }
+
+    stn_he_p(buf, len, val);
+    return r;
+}
+
 enum write_rom_type {
     WRITE_DATA,
     FLUSH_CACHE,
-- 
2.34.1


Reply via email to