From: Leon Romanovsky <leo...@nvidia.com> Convert the KMSAN DMA handling function from page-based to physical address-based interface.
The refactoring renames kmsan_handle_dma() parameters from accepting (struct page *page, size_t offset, size_t size) to (phys_addr_t phys, size_t size). A PFN_VALID check is added to prevent KMSAN operations on non-page memory, preventing from non struct page backed address, As part of this change, support for highmem addresses is implemented using kmap_local_page() to handle both lowmem and highmem regions properly. All callers throughout the codebase are updated to use the new phys_addr_t based interface. Signed-off-by: Leon Romanovsky <leo...@nvidia.com> --- drivers/virtio/virtio_ring.c | 4 ++-- include/linux/kmsan.h | 12 +++++++----- kernel/dma/mapping.c | 2 +- mm/kmsan/hooks.c | 36 +++++++++++++++++++++++++++++------- tools/virtio/linux/kmsan.h | 2 +- 5 files changed, 40 insertions(+), 16 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index b784aab66867..dab49385e3e8 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -378,7 +378,7 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist * is initialized by the hardware. Explicitly check/unpoison it * depending on the direction. */ - kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction); + kmsan_handle_dma(sg_phys(sg), sg->length, direction); *addr = (dma_addr_t)sg_phys(sg); return 0; } @@ -3149,7 +3149,7 @@ dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, struct vring_virtqueue *vq = to_vvq(_vq); if (!vq->use_dma_api) { - kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir); + kmsan_handle_dma(virt_to_phys(ptr), size, dir); return (dma_addr_t)virt_to_phys(ptr); } diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h index 2b1432cc16d5..6f27b9824ef7 100644 --- a/include/linux/kmsan.h +++ b/include/linux/kmsan.h @@ -182,8 +182,7 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end); /** * kmsan_handle_dma() - Handle a DMA data transfer. - * @page: first page of the buffer. - * @offset: offset of the buffer within the first page. + * @phys: physical address of the buffer. * @size: buffer size. * @dir: one of possible dma_data_direction values. * @@ -191,8 +190,11 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end); * * checks the buffer, if it is copied to device; * * initializes the buffer, if it is copied from device; * * does both, if this is a DMA_BIDIRECTIONAL transfer. + * + * The function handles page lookup internally and supports both lowmem + * and highmem addresses. */ -void kmsan_handle_dma(struct page *page, size_t offset, size_t size, +void kmsan_handle_dma(phys_addr_t phys, size_t size, enum dma_data_direction dir); /** @@ -372,8 +374,8 @@ static inline void kmsan_iounmap_page_range(unsigned long start, { } -static inline void kmsan_handle_dma(struct page *page, size_t offset, - size_t size, enum dma_data_direction dir) +static inline void kmsan_handle_dma(phys_addr_t phys, size_t size, + enum dma_data_direction dir) { } diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 80481a873340..709405d46b2b 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -172,7 +172,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, addr = iommu_dma_map_phys(dev, phys, size, dir, attrs); else addr = ops->map_page(dev, page, offset, size, dir, attrs); - kmsan_handle_dma(page, offset, size, dir); + kmsan_handle_dma(phys, size, dir); trace_dma_map_phys(dev, phys, addr, size, dir, attrs); debug_dma_map_phys(dev, phys, size, dir, addr, attrs); diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c index 97de3d6194f0..eab7912a3bf0 100644 --- a/mm/kmsan/hooks.c +++ b/mm/kmsan/hooks.c @@ -336,25 +336,48 @@ static void kmsan_handle_dma_page(const void *addr, size_t size, } /* Helper function to handle DMA data transfers. */ -void kmsan_handle_dma(struct page *page, size_t offset, size_t size, +void kmsan_handle_dma(phys_addr_t phys, size_t size, enum dma_data_direction dir) { u64 page_offset, to_go, addr; + struct page *page; + void *kaddr; - if (PageHighMem(page)) + if (!pfn_valid(PHYS_PFN(phys))) return; - addr = (u64)page_address(page) + offset; + + page = phys_to_page(phys); + page_offset = offset_in_page(phys); + /* * The kernel may occasionally give us adjacent DMA pages not belonging * to the same allocation. Process them separately to avoid triggering * internal KMSAN checks. */ while (size > 0) { - page_offset = offset_in_page(addr); to_go = min(PAGE_SIZE - page_offset, (u64)size); + + if (PageHighMem(page)) + /* Handle highmem pages using kmap */ + kaddr = kmap_local_page(page); + else + /* Lowmem pages can be accessed directly */ + kaddr = page_address(page); + + addr = (u64)kaddr + page_offset; kmsan_handle_dma_page((void *)addr, to_go, dir); - addr += to_go; + + if (PageHighMem(page)) + kunmap_local(page); + + phys += to_go; size -= to_go; + + /* Move to next page if needed */ + if (size > 0) { + page = phys_to_page(phys); + page_offset = offset_in_page(phys); + } } } EXPORT_SYMBOL_GPL(kmsan_handle_dma); @@ -366,8 +389,7 @@ void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, int i; for_each_sg(sg, item, nents, i) - kmsan_handle_dma(sg_page(item), item->offset, item->length, - dir); + kmsan_handle_dma(sg_phys(item), item->length, dir); } /* Functions from kmsan-checks.h follow. */ diff --git a/tools/virtio/linux/kmsan.h b/tools/virtio/linux/kmsan.h index 272b5aa285d5..6cd2e3efd03d 100644 --- a/tools/virtio/linux/kmsan.h +++ b/tools/virtio/linux/kmsan.h @@ -4,7 +4,7 @@ #include <linux/gfp.h> -inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size, +inline void kmsan_handle_dma(phys_addr_t phys, size_t size, enum dma_data_direction dir) { } -- 2.49.0