The last jump to free_exit in mm_iommu_do_alloc() happens after page
pointers in struct mm_iommu_table_group_mem_t were already converted to
physical addresses. Thus calling put_page() on these physical addresses
will likely crash. Convert physical addresses back to page pointers
during the error cleanup.

Signed-off-by: Jan Kara <j...@suse.cz>
---
 arch/powerpc/mm/book3s64/iommu_api.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

 Beware, this is completely untested, spotted just by code audit.

diff --git a/arch/powerpc/mm/book3s64/iommu_api.c 
b/arch/powerpc/mm/book3s64/iommu_api.c
index 56cc84520577..06c403381c9c 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -154,7 +154,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, 
unsigned long ua,
                                       (mem2->entries << PAGE_SHIFT)))) {
                        ret = -EINVAL;
                        mutex_unlock(&mem_list_mutex);
-                       goto free_exit;
+                       goto convert_exit;
                }
        }
 
@@ -166,6 +166,9 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, 
unsigned long ua,
 
        return 0;
 
+convert_exit:
+       for (i = 0; i < pinned; i++)
+               mem->hpages[i] = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
 free_exit:
        /* free the reference taken */
        for (i = 0; i < pinned; i++)
-- 
2.16.4

Reply via email to