Let's not piggy-back on the existing lock and use a separate lock for the
huge page list.

This is a preparation for changing the locking used to protect
balloon_dev_info.

While at it, talk about "page migration" instead of "page compaction".
We'll change that in core code soon as well.

Signed-off-by: David Hildenbrand <[email protected]>
---
 drivers/misc/vmw_balloon.c | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 52b8c0f1eead7..53e9335b6718c 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -354,10 +354,15 @@ struct vmballoon {
        /**
         * @huge_pages - list of the inflated 2MB pages.
         *
-        * Protected by @b_dev_info.pages_lock .
+        * Protected by @huge_pages_lock.
         */
        struct list_head huge_pages;
 
+       /**
+        * @huge_pages_lock: lock for the list of inflated 2MB pages.
+        */
+       spinlock_t huge_pages_lock;
+
        /**
         * @vmci_doorbell.
         *
@@ -987,7 +992,6 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
                                        unsigned int *n_pages,
                                        enum vmballoon_page_size_type page_size)
 {
-       unsigned long flags;
        struct page *page;
 
        if (page_size == VMW_BALLOON_4K_PAGE) {
@@ -995,9 +999,9 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
        } else {
                /*
                 * Keep the huge pages in a local list which is not available
-                * for the balloon compaction mechanism.
+                * for the balloon page migration.
                 */
-               spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+               spin_lock(&b->huge_pages_lock);
 
                list_for_each_entry(page, pages, lru) {
                        vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
@@ -1006,7 +1010,7 @@ static void vmballoon_enqueue_page_list(struct vmballoon 
*b,
                list_splice_init(pages, &b->huge_pages);
                __count_vm_events(BALLOON_INFLATE, *n_pages *
                                  
vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
-               spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
+               spin_unlock(&b->huge_pages_lock);
        }
 
        *n_pages = 0;
@@ -1033,7 +1037,6 @@ static void vmballoon_dequeue_page_list(struct vmballoon 
*b,
 {
        struct page *page, *tmp;
        unsigned int i = 0;
-       unsigned long flags;
 
        /* In the case of 4k pages, use the compaction infrastructure */
        if (page_size == VMW_BALLOON_4K_PAGE) {
@@ -1043,7 +1046,7 @@ static void vmballoon_dequeue_page_list(struct vmballoon 
*b,
        }
 
        /* 2MB pages */
-       spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
+       spin_lock(&b->huge_pages_lock);
        list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
                vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
 
@@ -1054,7 +1057,7 @@ static void vmballoon_dequeue_page_list(struct vmballoon 
*b,
 
        __count_vm_events(BALLOON_DEFLATE,
                          i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
-       spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
+       spin_unlock(&b->huge_pages_lock);
        *n_pages = i;
 }
 
@@ -1828,6 +1831,7 @@ static int __init vmballoon_init(void)
                balloon.b_dev_info.migratepage = vmballoon_migratepage;
 
        INIT_LIST_HEAD(&balloon.huge_pages);
+       spin_lock_init(&balloon.huge_pages_lock);
        spin_lock_init(&balloon.comm_lock);
        init_rwsem(&balloon.conf_sem);
        balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
-- 
2.51.0


Reply via email to