Bounce-buffering makes the system spend more time copying
I/O data. When the I/O transaction take place between
a confidential and a non-confidential endpoints, there is
no other way around.

Introduce a device bitfield to indicate that the device
doesn't need to perform bounce buffering. The capable
device may employ it to save on copying data around.

Signed-off-by: Roman Kisel <rom...@linux.microsoft.com>
---
 arch/x86/mm/mem_encrypt.c  | 3 +++
 include/linux/device.h     | 8 ++++++++
 include/linux/dma-direct.h | 3 +++
 include/linux/swiotlb.h    | 3 +++
 4 files changed, 17 insertions(+)

diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 95bae74fdab2..6349a02a1da3 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -19,6 +19,9 @@
 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
 bool force_dma_unencrypted(struct device *dev)
 {
+       if (dev->use_priv_pages_for_io)
+               return false;
+
        /*
         * For SEV, all DMA must be to unencrypted addresses.
         */
diff --git a/include/linux/device.h b/include/linux/device.h
index 80a5b3268986..4aa4a6fd9580 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -725,6 +725,8 @@ struct device_physical_location {
  * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
  * @dma_iommu: Device is using default IOMMU implementation for DMA and
  *             doesn't rely on dma_ops structure.
+ * @use_priv_pages_for_io: Device is using private pages for I/O, no need to
+ *             bounce-buffer.
  *
  * At the lowest level, every device in a Linux system is represented by an
  * instance of struct device. The device structure contains the information
@@ -843,6 +845,7 @@ struct device {
 #ifdef CONFIG_IOMMU_DMA
        bool                    dma_iommu:1;
 #endif
+       bool                    use_priv_pages_for_io:1;
 };
 
 /**
@@ -1079,6 +1082,11 @@ static inline bool dev_removable_is_valid(struct device 
*dev)
        return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
 }
 
+static inline bool dev_priv_pages_for_io(struct device *dev)
+{
+       return dev->use_priv_pages_for_io;
+}
+
 /*
  * High level routines for use by the bus drivers
  */
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index d7e30d4f7503..b096369f847e 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -94,6 +94,9 @@ static inline dma_addr_t phys_to_dma_unencrypted(struct 
device *dev,
  */
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
+       if (dev_priv_pages_for_io(dev))
+               return phys_to_dma_unencrypted(dev, paddr);
+
        return __sme_set(phys_to_dma_unencrypted(dev, paddr));
 }
 
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 3dae0f592063..35ee10641b42 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -173,6 +173,9 @@ static inline bool is_swiotlb_force_bounce(struct device 
*dev)
 {
        struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 
+       if (dev_priv_pages_for_io(dev))
+               return false;
+
        return mem && mem->force_bounce;
 }
 
-- 
2.43.0


Reply via email to