From: Joerg Roedel <jroe...@suse.de>

Add a counter to the pasid_state so that we do not restore
the original page-table before all invalidate_range_start
to invalidate_range_end sections have finished.

Signed-off-by: Joerg Roedel <jroe...@suse.de>
---
 drivers/iommu/amd_iommu_v2.c |   17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d9309a0..9ed9da2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -45,6 +45,8 @@ struct pri_queue {
 struct pasid_state {
        struct list_head list;                  /* For global state-list */
        atomic_t count;                         /* Reference count */
+       atomic_t mmu_notifier_count;            /* Counting nested mmu_notifier
+                                                  calls */
        struct task_struct *task;               /* Task bound to this PASID */
        struct mm_struct *mm;                   /* mm_struct for the faults */
        struct mmu_notifier mn;                 /* mmu_otifier handle */
@@ -433,8 +435,11 @@ static void mn_invalidate_range_start(struct mmu_notifier 
*mn,
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
-                                 __pa(empty_page_table));
+       if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
+               amd_iommu_domain_set_gcr3(dev_state->domain,
+                                         pasid_state->pasid,
+                                         __pa(empty_page_table));
+       }
 }
 
 static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -447,8 +452,11 @@ static void mn_invalidate_range_end(struct mmu_notifier 
*mn,
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
-                                 __pa(pasid_state->mm->pgd));
+       if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
+               amd_iommu_domain_set_gcr3(dev_state->domain,
+                                         pasid_state->pasid,
+                                         __pa(pasid_state->mm->pgd));
+       }
 }
 
 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -640,6 +648,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
                goto out;
 
        atomic_set(&pasid_state->count, 1);
+       atomic_set(&pasid_state->mmu_notifier_count, 0);
        init_waitqueue_head(&pasid_state->wq);
        spin_lock_init(&pasid_state->lock);
 
-- 
1.7.9.5


_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to