From: Petr Tesarik <petr.tesarik....@huawei.com>

To prepare for the introduction of dynamically allocated bounce
buffers, separate out common code and code which handles non-dynamic
(aka fixed) bounce buffers.

No functional change, but this commit should make the addition of
dynamic allocations easier to review.

Signed-off-by: Petr Tesarik <petr.tesarik....@huawei.com>
---
 include/linux/swiotlb.h |  31 ++++++++++-
 kernel/dma/swiotlb.c    | 110 +++++++++++++++++++++++++++++++++-------
 2 files changed, 122 insertions(+), 19 deletions(-)

diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index d851dbce1143..281ecc6b9bcc 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -114,11 +114,40 @@ struct io_tlb_mem {
 };
 extern struct io_tlb_mem io_tlb_default_mem;
 
+/**
+ * is_swiotlb_fixed() - check if a physical address belongs to a swiotlb slot
+ * @mem:       relevant swiotlb pool
+ * @paddr:     physical address within the DMA buffer
+ *
+ * Check if @paddr points into a fixed bounce buffer slot.
+ * This check should be as fast as possible.
+ *
+ * Return:
+ * * %true if @paddr points into a @mem fixed slot
+ * * %false otherwise
+ */
+static inline bool is_swiotlb_fixed(struct io_tlb_mem *mem, phys_addr_t paddr)
+{
+       return paddr >= mem->start && paddr < mem->end;
+}
+
+/**
+ * is_swiotlb_buffer() - check if a physical address is allocated from the
+ *                       swiotlb pool
+ * @dev:       device which has mapped the buffer
+ * @paddr:     physical address within the DMA buffer
+ *
+ * Check if @paddr points into a bounce buffer.
+ *
+ * Return:
+ * * %true if @paddr points into a bounce buffer
+ * * %false otherwise
+ */
 static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
 {
        struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 
-       return mem && paddr >= mem->start && paddr < mem->end;
+       return mem && is_swiotlb_fixed(mem, paddr);
 }
 
 static inline bool is_swiotlb_force_bounce(struct device *dev)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index af2e304c672c..96ba93be6772 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -76,6 +76,10 @@ struct io_tlb_mem io_tlb_default_mem;
 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
 static unsigned long default_nareas;
 
+static void swiotlb_copy(struct device *dev, phys_addr_t orig_addr,
+               unsigned char *vaddr, size_t size, size_t alloc_size,
+               unsigned int tlb_offset, enum dma_data_direction dir);
+
 /**
  * struct io_tlb_area - IO TLB memory area descriptor
  *
@@ -480,7 +484,6 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t 
tlb_addr, size_t size
        int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
        phys_addr_t orig_addr = mem->slots[index].orig_addr;
        size_t alloc_size = mem->slots[index].alloc_size;
-       unsigned long pfn = PFN_DOWN(orig_addr);
        unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
        unsigned int tlb_offset, orig_addr_offset;
 
@@ -497,6 +500,34 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t 
tlb_addr, size_t size
        }
 
        tlb_offset -= orig_addr_offset;
+       swiotlb_copy(dev, orig_addr, vaddr, size, alloc_size, tlb_offset, dir);
+}
+
+/**
+ * swiotlb_copy() - copy swiotlb buffer content, checking for overflows.
+ * @dev:       device which has mapped the bounce buffer
+ * @orig_addr: physical address of the original buffer
+ * @vaddr:     virtual address inside the bounce buffer
+ * @size:      number of bytes to copy
+ * @alloc_size:        total allocated size of the bounce buffer
+ * @tlb_offset:        offset within the bounce buffer
+ * @dir:       direction of the data transfer
+ *
+ * If @dir is %DMA_TO_DEVICE, copy data from the original buffer to the
+ * bounce buffer, otherwise copy from the bounce buffer to the original
+ * buffer.
+ *
+ * The original buffer may be in high memory; that's why @orig_addr is
+ * a physical address. Note that this is the address of the beginning
+ * of the bounce buffer. Copying starts at offset @tlb_offset. This is
+ * needed to check accesses beyond the allocated size.
+ */
+static void swiotlb_copy(struct device *dev, phys_addr_t orig_addr,
+               unsigned char *vaddr, size_t size, size_t alloc_size,
+               unsigned int tlb_offset, enum dma_data_direction dir)
+{
+       unsigned long pfn = PFN_DOWN(orig_addr);
+
        if (tlb_offset > alloc_size) {
                dev_WARN_ONCE(dev, 1,
                        "Buffer overflow detected. Allocation size: %zu. 
Mapping size: %zu+%u.\n",
@@ -727,15 +758,65 @@ static unsigned long mem_used(struct io_tlb_mem *mem)
        return used;
 }
 
+/**
+ * swiotlb_fixed_map() - allocate a bounce buffer from fixed slots
+ * @dev:       device which maps the buffer
+ * @orig_addr: address of the original buffer
+ * @alloc_size:        total size of the original buffer
+ * @alloc_align_mask:
+ *             required physical alignment of the I/O buffer
+ * @attrs:     optional DMA attributes for the map operation
+ *
+ * Search for a suitable slot or sequence of slots and initialize them
+ * for use with the original buffer.
+ *
+ * Return: Physical address of the bounce buffer, or %DMA_MAPPING_ERROR.
+ */
+static phys_addr_t swiotlb_fixed_map(struct device *dev, phys_addr_t orig_addr,
+                       size_t alloc_size, unsigned int alloc_align_mask,
+                       unsigned long attrs)
+{
+       struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
+       unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+       int index = swiotlb_find_slots(dev, orig_addr,
+                                      alloc_size + offset, alloc_align_mask);
+       unsigned int i;
+
+       if (index == -1)
+               return (phys_addr_t)DMA_MAPPING_ERROR;
+
+       /*
+        * Save away the mapping from the original address to the DMA address.
+        * This is needed when we sync the memory.  Then we sync the buffer if
+        * needed.
+        */
+       for (i = 0; i < nr_slots(alloc_size + offset); i++)
+               mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
+       return slot_addr(mem->start, index) + offset;
+}
+
+/**
+ * swiotlb_tbl_map_single() - map DMA buffer to a bounce buffer
+ * @dev:       device which maps the buffer
+ * @orig_addr: address of the original buffer
+ * @mapping_size: size of the original buffer to be synced now
+ * @alloc_size:        total size of the original buffer
+ * @alloc_align_mask:
+ *             required physical alignment of the I/O buffer
+ * @dir:       direction of the data transfer
+ * @attrs:     optional DMA attributes for the map operation
+ *
+ * Create a mapping of the DMA buffer into a bounce buffer and copy the
+ * original data.
+ *
+ * Return: Physical address of the bounce buffer, or %DMA_MAPPING_ERROR.
+ */
 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
                size_t mapping_size, size_t alloc_size,
                unsigned int alloc_align_mask, enum dma_data_direction dir,
                unsigned long attrs)
 {
        struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
-       unsigned int offset = swiotlb_align_offset(dev, orig_addr);
-       unsigned int i;
-       int index;
        phys_addr_t tlb_addr;
 
        if (!mem || !mem->nslabs) {
@@ -753,24 +834,17 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, 
phys_addr_t orig_addr,
                return (phys_addr_t)DMA_MAPPING_ERROR;
        }
 
-       index = swiotlb_find_slots(dev, orig_addr,
-                                  alloc_size + offset, alloc_align_mask);
-       if (index == -1) {
+       tlb_addr = swiotlb_fixed_map(dev, orig_addr, alloc_size,
+                                    alloc_align_mask, attrs);
+
+       if (tlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) {
                if (!(attrs & DMA_ATTR_NO_WARN))
                        dev_warn_ratelimited(dev,
-       "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu 
(slots)\n",
-                                alloc_size, mem->nslabs, mem_used(mem));
-               return (phys_addr_t)DMA_MAPPING_ERROR;
+                               "swiotlb buffer is full (sz: %zd bytes), total 
%lu (slots), used %lu (slots)\n",
+                               alloc_size, mem->nslabs, mem_used(mem));
+               return tlb_addr;
        }
 
-       /*
-        * Save away the mapping from the original address to the DMA address.
-        * This is needed when we sync the memory.  Then we sync the buffer if
-        * needed.
-        */
-       for (i = 0; i < nr_slots(alloc_size + offset); i++)
-               mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
-       tlb_addr = slot_addr(mem->start, index) + offset;
        /*
         * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
         * to the tlb buffer, if we knew for sure the device will
-- 
2.25.1

Reply via email to