This function performs all the work on the fast path, and returns
enough information for the slow path to pick up the work.  This
will be used later by other functions that only do the fast path.

Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 exec.c |   77 ++++++++++++++++++++++++++++++++++++++++-----------------------
 1 files changed, 49 insertions(+), 28 deletions(-)

diff --git a/exec.c b/exec.c
index a718d74..9b2c9e4 100644
--- a/exec.c
+++ b/exec.c
@@ -3898,16 +3898,9 @@ static void cpu_notify_map_clients(void)
     }
 }
 
-/* Map a physical memory region into a host virtual address.
- * May map a subset of the requested range, given by and returned in *plen.
- * May return NULL if resources needed to perform the mapping are exhausted.
- * Use only for reads OR writes - not for read-modify-write operations.
- * Use cpu_register_map_client() to know when retrying the map operation is
- * likely to succeed.
- */
-void *cpu_physical_memory_map(target_phys_addr_t addr,
-                              target_phys_addr_t *plen,
-                              int is_write)
+static void *cpu_physical_memory_map_internal(target_phys_addr_t addr,
+                                              target_phys_addr_t *plen,
+                                              uintptr_t *pd)
 {
     target_phys_addr_t len = *plen;
     target_phys_addr_t done = 0;
@@ -3915,7 +3908,6 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
     uint8_t *ret = NULL;
     uint8_t *ptr;
     target_phys_addr_t page;
-    unsigned long pd;
     PhysPageDesc *p;
     unsigned long addr1;
 
@@ -3926,26 +3918,16 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
             l = len;
         p = phys_page_find(page >> TARGET_PAGE_BITS);
         if (!p) {
-            pd = IO_MEM_UNASSIGNED;
-        } else {
-            pd = p->phys_offset;
+            *pd = IO_MEM_UNASSIGNED;
+            break;
         }
 
-        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
-            if (done || bounce.buffer) {
-                break;
-            }
-            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
-            bounce.addr = addr;
-            bounce.len = l;
-            if (!is_write) {
-                cpu_physical_memory_read(addr, bounce.buffer, l);
-            }
-            ptr = bounce.buffer;
-        } else {
-            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
-            ptr = qemu_get_ram_ptr(addr1);
+        *pd = p->phys_offset;
+        if ((*pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
+            break;
         }
+        addr1 = (*pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
+        ptr = qemu_get_ram_ptr(addr1);
         if (!done) {
             ret = ptr;
         } else if (ret + done != ptr) {
@@ -3960,6 +3942,45 @@ void *cpu_physical_memory_map(target_phys_addr_t addr,
     return ret;
 }
 
+/* Map a physical memory region into a host virtual address.
+ * May map a subset of the requested range, given by and returned in *plen.
+ * May return NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ */
+void *cpu_physical_memory_map(target_phys_addr_t addr,
+                              target_phys_addr_t *plen,
+                              int is_write)
+{
+    target_phys_addr_t page;
+    uintptr_t pd = IO_MEM_UNASSIGNED;
+    void *ret;
+    ret = cpu_physical_memory_map_internal(addr, plen, &pd);
+    if (ret) {
+        return ret;
+    }
+
+    assert((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM);
+    if (pd == IO_MEM_UNASSIGNED) {
+        return NULL;
+    }
+    if (bounce.buffer) {
+        return NULL;
+    }
+
+    /* Read at most a page into the temporary buffer.  */
+    page = addr & TARGET_PAGE_MASK;
+    bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
+    bounce.addr = addr;
+    bounce.len = MIN(page + TARGET_PAGE_SIZE - addr, *plen);
+    if (!is_write) {
+        cpu_physical_memory_read(addr, bounce.buffer, bounce.len);
+    }
+    *plen = bounce.len;
+    return bounce.buffer;
+}
+
 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
  * Will also mark the memory as dirty if is_write == 1.  access_len gives
  * the amount of memory that was actually read or written by the caller.
-- 
1.7.4.4



Reply via email to