On 19.08.2025 19:36, Leon Romanovsky wrote: > From: Leon Romanovsky <leo...@nvidia.com> > > Convert the DMA debug infrastructure from page-based to physical address-based > mapping as a preparation to rely on physical address for DMA mapping routines. > > The refactoring renames debug_dma_map_page() to debug_dma_map_phys() and > changes its signature to accept a phys_addr_t parameter instead of struct page > and offset. Similarly, debug_dma_unmap_page() becomes debug_dma_unmap_phys(). > A new dma_debug_phy type is introduced to distinguish physical address > mappings > from other debug entry types. All callers throughout the codebase are updated > to pass physical addresses directly, eliminating the need for page-to-physical > conversion in the debug layer. > > This refactoring eliminates the need to convert between page pointers and > physical addresses in the debug layer, making the code more efficient and > consistent with the DMA mapping API's physical address focus. > > Reviewed-by: Jason Gunthorpe <j...@nvidia.com> > Signed-off-by: Leon Romanovsky <leo...@nvidia.com>
This change needs to be based on top of this patch https://lore.kernel.org/all/20250828-dma-debug-fix-noncoherent-dma-check-v1-1-76e9be0dd...@oss.qualcomm.com so the easiest way would be to rebase this patchset onto https://web.git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux.git/log/?h=dma-mapping-fixes branch (resolving conflicts is trivial) for the next version. > --- > Documentation/core-api/dma-api.rst | 4 ++-- > kernel/dma/debug.c | 28 +++++++++++++++++----------- > kernel/dma/debug.h | 16 +++++++--------- > kernel/dma/mapping.c | 15 ++++++++------- > 4 files changed, 34 insertions(+), 29 deletions(-) > > diff --git a/Documentation/core-api/dma-api.rst > b/Documentation/core-api/dma-api.rst > index 3087bea715ed..ca75b3541679 100644 > --- a/Documentation/core-api/dma-api.rst > +++ b/Documentation/core-api/dma-api.rst > @@ -761,7 +761,7 @@ example warning message may look like this:: > [<ffffffff80235177>] find_busiest_group+0x207/0x8a0 > [<ffffffff8064784f>] _spin_lock_irqsave+0x1f/0x50 > [<ffffffff803c7ea3>] check_unmap+0x203/0x490 > - [<ffffffff803c8259>] debug_dma_unmap_page+0x49/0x50 > + [<ffffffff803c8259>] debug_dma_unmap_phys+0x49/0x50 > [<ffffffff80485f26>] nv_tx_done_optimized+0xc6/0x2c0 > [<ffffffff80486c13>] nv_nic_irq_optimized+0x73/0x2b0 > [<ffffffff8026df84>] handle_IRQ_event+0x34/0x70 > @@ -855,7 +855,7 @@ that a driver may be leaking mappings. > dma-debug interface debug_dma_mapping_error() to debug drivers that fail > to check DMA mapping errors on addresses returned by dma_map_single() and > dma_map_page() interfaces. This interface clears a flag set by > -debug_dma_map_page() to indicate that dma_mapping_error() has been called by > +debug_dma_map_phys() to indicate that dma_mapping_error() has been called by > the driver. When driver does unmap, debug_dma_unmap() checks the flag and if > this flag is still set, prints warning message that includes call trace that > leads up to the unmap. This interface can be called from dma_mapping_error() > diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c > index e43c6de2bce4..da6734e3a4ce 100644 > --- a/kernel/dma/debug.c > +++ b/kernel/dma/debug.c > @@ -39,6 +39,7 @@ enum { > dma_debug_sg, > dma_debug_coherent, > dma_debug_resource, > + dma_debug_phy, > }; > > enum map_err_types { > @@ -141,6 +142,7 @@ static const char *type2name[] = { > [dma_debug_sg] = "scatter-gather", > [dma_debug_coherent] = "coherent", > [dma_debug_resource] = "resource", > + [dma_debug_phy] = "phy", > }; > > static const char *dir2name[] = { > @@ -1201,9 +1203,8 @@ void debug_dma_map_single(struct device *dev, const > void *addr, > } > EXPORT_SYMBOL(debug_dma_map_single); > > -void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, > - size_t size, int direction, dma_addr_t dma_addr, > - unsigned long attrs) > +void debug_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, > + int direction, dma_addr_t dma_addr, unsigned long attrs) > { > struct dma_debug_entry *entry; > > @@ -1218,19 +1219,24 @@ void debug_dma_map_page(struct device *dev, struct > page *page, size_t offset, > return; > > entry->dev = dev; > - entry->type = dma_debug_single; > - entry->paddr = page_to_phys(page) + offset; > + entry->type = dma_debug_phy; > + entry->paddr = phys; > entry->dev_addr = dma_addr; > entry->size = size; > entry->direction = direction; > entry->map_err_type = MAP_ERR_NOT_CHECKED; > > - check_for_stack(dev, page, offset); > + if (!(attrs & DMA_ATTR_MMIO)) { > + struct page *page = phys_to_page(phys); > + size_t offset = offset_in_page(page); > > - if (!PageHighMem(page)) { > - void *addr = page_address(page) + offset; > + check_for_stack(dev, page, offset); > > - check_for_illegal_area(dev, addr, size); > + if (!PageHighMem(page)) { > + void *addr = page_address(page) + offset; > + > + check_for_illegal_area(dev, addr, size); > + } > } > > add_dma_entry(entry, attrs); > @@ -1274,11 +1280,11 @@ void debug_dma_mapping_error(struct device *dev, > dma_addr_t dma_addr) > } > EXPORT_SYMBOL(debug_dma_mapping_error); > > -void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, > +void debug_dma_unmap_phys(struct device *dev, dma_addr_t dma_addr, > size_t size, int direction) > { > struct dma_debug_entry ref = { > - .type = dma_debug_single, > + .type = dma_debug_phy, > .dev = dev, > .dev_addr = dma_addr, > .size = size, > diff --git a/kernel/dma/debug.h b/kernel/dma/debug.h > index f525197d3cae..76adb42bffd5 100644 > --- a/kernel/dma/debug.h > +++ b/kernel/dma/debug.h > @@ -9,12 +9,11 @@ > #define _KERNEL_DMA_DEBUG_H > > #ifdef CONFIG_DMA_API_DEBUG > -extern void debug_dma_map_page(struct device *dev, struct page *page, > - size_t offset, size_t size, > - int direction, dma_addr_t dma_addr, > +extern void debug_dma_map_phys(struct device *dev, phys_addr_t phys, > + size_t size, int direction, dma_addr_t dma_addr, > unsigned long attrs); > > -extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, > +extern void debug_dma_unmap_phys(struct device *dev, dma_addr_t addr, > size_t size, int direction); > > extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, > @@ -55,14 +54,13 @@ extern void debug_dma_sync_sg_for_device(struct device > *dev, > struct scatterlist *sg, > int nelems, int direction); > #else /* CONFIG_DMA_API_DEBUG */ > -static inline void debug_dma_map_page(struct device *dev, struct page *page, > - size_t offset, size_t size, > - int direction, dma_addr_t dma_addr, > - unsigned long attrs) > +static inline void debug_dma_map_phys(struct device *dev, phys_addr_t phys, > + size_t size, int direction, > + dma_addr_t dma_addr, unsigned long attrs) > { > } > > -static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, > +static inline void debug_dma_unmap_phys(struct device *dev, dma_addr_t addr, > size_t size, int direction) > { > } > diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c > index 107e4a4d251d..4c1dfbabb8ae 100644 > --- a/kernel/dma/mapping.c > +++ b/kernel/dma/mapping.c > @@ -157,6 +157,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct > page *page, > unsigned long attrs) > { > const struct dma_map_ops *ops = get_dma_ops(dev); > + phys_addr_t phys = page_to_phys(page) + offset; > dma_addr_t addr; > > BUG_ON(!valid_dma_direction(dir)); > @@ -165,16 +166,15 @@ dma_addr_t dma_map_page_attrs(struct device *dev, > struct page *page, > return DMA_MAPPING_ERROR; > > if (dma_map_direct(dev, ops) || > - arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) > + arch_dma_map_page_direct(dev, phys + size)) > addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); > else if (use_dma_iommu(dev)) > addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs); > else > addr = ops->map_page(dev, page, offset, size, dir, attrs); > kmsan_handle_dma(page, offset, size, dir); > - trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir, > - attrs); > - debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); > + trace_dma_map_page(dev, phys, addr, size, dir, attrs); > + debug_dma_map_phys(dev, phys, size, dir, addr, attrs); > > return addr; > } > @@ -194,7 +194,7 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t > addr, size_t size, > else > ops->unmap_page(dev, addr, size, dir, attrs); > trace_dma_unmap_page(dev, addr, size, dir, attrs); > - debug_dma_unmap_page(dev, addr, size, dir); > + debug_dma_unmap_phys(dev, addr, size, dir); > } > EXPORT_SYMBOL(dma_unmap_page_attrs); > > @@ -712,7 +712,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t > size, > if (page) { > trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle, > size, dir, gfp, 0); > - debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0); > + debug_dma_map_phys(dev, page_to_phys(page), size, dir, > + *dma_handle, 0); > } else { > trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0); > } > @@ -738,7 +739,7 @@ void dma_free_pages(struct device *dev, size_t size, > struct page *page, > dma_addr_t dma_handle, enum dma_data_direction dir) > { > trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0); > - debug_dma_unmap_page(dev, dma_handle, size, dir); > + debug_dma_unmap_phys(dev, dma_handle, size, dir); > __dma_free_pages(dev, size, page, dma_handle, dir); > } > EXPORT_SYMBOL_GPL(dma_free_pages); Best regards -- Marek Szyprowski, PhD Samsung R&D Institute Poland