the object iova->head allocated statically when enable SRIOV but freed dynamically when disable SRIOV which causing kmemleak. changing the allocation from statically to dynamically.
Signed-off-by: Peng Ju Zhou <pengju.z...@amd.com> --- drivers/iommu/iova.c | 15 ++++++++------- include/linux/iova.h | 4 ++-- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 2371524796d3..505881d8d97f 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -26,6 +26,8 @@ static void free_iova_rcaches(struct iova_domain *iovad); static void fq_destroy_all_entries(struct iova_domain *iovad); static void fq_flush_timeout(struct timer_list *t); static void free_global_cached_iovas(struct iova_domain *iovad); +static inline struct iova *alloc_and_init_iova(unsigned long pfn_lo, + unsigned long pfn_hi); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, @@ -47,17 +49,16 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, INIT_LIST_HEAD(&iovad->holes); - iovad->head.pfn_lo = 0; - iovad->head.pfn_hi = start_pfn; - rb_link_node(&iovad->head.node, NULL, &iovad->rbroot.rb_node); - rb_insert_color(&iovad->head.node, &iovad->rbroot); - list_add(&iovad->head.hole, &iovad->holes); + iovad->head = alloc_and_init_iova(0, start_pfn); + rb_link_node(&iovad->head->node, NULL, &iovad->rbroot.rb_node); + rb_insert_color(&iovad->head->node, &iovad->rbroot); + list_add(&iovad->head->hole, &iovad->holes); iovad->tail.pfn_lo = IOVA_ANCHOR; iovad->tail.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->tail.node, - &iovad->head.node, - &iovad->head.node.rb_right); + &iovad->head->node, + &iovad->head->node.rb_right); rb_insert_color(&iovad->tail.node, &iovad->rbroot); init_iova_rcaches(iovad); diff --git a/include/linux/iova.h b/include/linux/iova.h index 076eb6cfc613..553905ef41fe 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -81,7 +81,7 @@ struct iova_domain { have been finished */ struct list_head holes; - struct iova head, tail; /* rbtree lookup anchors */ + struct iova *head, tail; /* rbtree lookup anchors */ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU @@ -252,7 +252,7 @@ static inline void free_cpu_cached_iovas(unsigned int cpu, static inline unsigned long iovad_start_pfn(struct iova_domain *iovad) { - return iovad->head.pfn_hi; + return iovad->head->pfn_hi; } #endif -- 2.17.1 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu