When swiotlb is full, the kernel would print io_tlb_used. However, the
result might be inaccurate at that time because we have left the critical
section protected by spinlock.

Therefore, we backup the io_tlb_used into local variable before leaving
critical section.

Fixes: 83ca25948940 ("swiotlb: dump used and total slots when swiotlb buffer is 
full")
Suggested-by: HÃ¥kon Bugge <haakon.bu...@oracle.com>
Signed-off-by: Dongli Zhang <dongli.zh...@oracle.com>
---
 kernel/dma/swiotlb.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 857e823..6f7619c 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -452,6 +452,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
        unsigned long mask;
        unsigned long offset_slots;
        unsigned long max_slots;
+       unsigned long tmp_io_tlb_used;
 
        if (no_iotlb_memory)
                panic("Can not allocate SWIOTLB buffer earlier and can't now 
provide you with the DMA bounce buffer");
@@ -538,10 +539,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
        } while (index != wrap);
 
 not_found:
+       tmp_io_tlb_used = io_tlb_used;
+
        spin_unlock_irqrestore(&io_tlb_lock, flags);
        if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
-               dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total 
%lu, used %lu\n",
-                        size, io_tlb_nslabs, io_tlb_used);
+               dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total 
%lu (slots), used %lu (slots)\n",
+                        size, io_tlb_nslabs, tmp_io_tlb_used);
        return DMA_MAPPING_ERROR;
 found:
        io_tlb_used += nslots;
-- 
2.7.4

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to