On 2021/3/25 8:28, Mike Kravetz wrote:
> After making hugetlb lock irq safe and separating some functionality
> done under the lock, add some lockdep_assert_held to help verify
> locking.
> 

Looks good to me. Thanks.
Reviewed-by: Miaohe Lin <linmia...@huawei.com>

> Signed-off-by: Mike Kravetz <mike.krav...@oracle.com>
> ---
>  mm/hugetlb.c | 9 +++++++++
>  1 file changed, 9 insertions(+)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index e4c441b878f2..de5b3cf4a155 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1062,6 +1062,8 @@ static bool vma_has_reserves(struct vm_area_struct 
> *vma, long chg)
>  static void enqueue_huge_page(struct hstate *h, struct page *page)
>  {
>       int nid = page_to_nid(page);
> +
> +     lockdep_assert_held(&hugetlb_lock);
>       list_move(&page->lru, &h->hugepage_freelists[nid]);
>       h->free_huge_pages++;
>       h->free_huge_pages_node[nid]++;
> @@ -1073,6 +1075,7 @@ static struct page *dequeue_huge_page_node_exact(struct 
> hstate *h, int nid)
>       struct page *page;
>       bool pin = !!(current->flags & PF_MEMALLOC_PIN);
>  
> +     lockdep_assert_held(&hugetlb_lock);
>       list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
>               if (pin && !is_pinnable_page(page))
>                       continue;
> @@ -1345,6 +1348,7 @@ static void remove_hugetlb_page(struct hstate *h, 
> struct page *page,
>  {
>       int nid = page_to_nid(page);
>  
> +     lockdep_assert_held(&hugetlb_lock);
>       if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
>               return;
>  
> @@ -1690,6 +1694,7 @@ static struct page *remove_pool_huge_page(struct hstate 
> *h,
>       int nr_nodes, node;
>       struct page *page = NULL;
>  
> +     lockdep_assert_held(&hugetlb_lock);
>       for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
>               /*
>                * If we're returning unused surplus pages, only examine
> @@ -1939,6 +1944,7 @@ static int gather_surplus_pages(struct hstate *h, long 
> delta)
>       long needed, allocated;
>       bool alloc_ok = true;
>  
> +     lockdep_assert_held(&hugetlb_lock);
>       needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
>       if (needed <= 0) {
>               h->resv_huge_pages += delta;
> @@ -2032,6 +2038,7 @@ static void return_unused_surplus_pages(struct hstate 
> *h,
>       struct page *page, *t_page;
>       struct list_head page_list;
>  
> +     lockdep_assert_held(&hugetlb_lock);
>       /* Uncommit the reservation */
>       h->resv_huge_pages -= unused_resv_pages;
>  
> @@ -2527,6 +2534,7 @@ static void try_to_free_low(struct hstate *h, unsigned 
> long count,
>       struct list_head page_list;
>       struct page *page, *next;
>  
> +     lockdep_assert_held(&hugetlb_lock);
>       if (hstate_is_gigantic(h))
>               return;
>  
> @@ -2573,6 +2581,7 @@ static int adjust_pool_surplus(struct hstate *h, 
> nodemask_t *nodes_allowed,
>  {
>       int nr_nodes, node;
>  
> +     lockdep_assert_held(&hugetlb_lock);
>       VM_BUG_ON(delta != -1 && delta != 1);
>  
>       if (delta < 0) {
> 

Reply via email to