This is necessary to avoid exposing valid kernel data to any malicious device.
Suggested-by: Christoph Hellwig <h...@lst.de> Signed-off-by: Lu Baolu <baolu...@linux.intel.com> --- kernel/dma/swiotlb.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 89066efa3840..04bea5a87462 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -35,6 +35,7 @@ #include <linux/scatterlist.h> #include <linux/mem_encrypt.h> #include <linux/set_memory.h> +#include <linux/pci.h> #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #endif @@ -458,6 +459,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, unsigned long offset_slots; unsigned long max_slots; unsigned long tmp_io_tlb_used; + void *zero_addr; + size_t zero_size; if (no_iotlb_memory) panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); @@ -565,9 +568,20 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, */ for (i = 0; i < nslots; i++) io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); + + zero_addr = phys_to_virt(tlb_addr); + zero_size = alloc_size; + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) + (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); + zero_addr += mapping_size; + zero_size -= mapping_size; + } + + /* Zero out the bounce buffer if the consumer is untrusted. */ + if (dev_is_untrusted(hwdev) && zero_size) + memset(zero_addr, 0, zero_size); return tlb_addr; } -- 2.17.1 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu