The commit is pushed to "branch-rh9-5.14.0-427.44.1.vz9.80.x-ovz" and will 
appear at g...@bitbucket.org:openvz/vzkernel.git
after rh9-5.14.0-427.44.1.vz9.80.6
------>
commit 6c09eab1f677d38d8a07eed798ec482c6435b15d
Author: Alexander Atanasov <alexander.atana...@virtuozzo.com>
Date:   Fri Jan 24 17:35:35 2025 +0200
    dm-ploop: md_pages map all pages at creation time
    
    md_page is always present in memory. In that case
    md_page->page could be always be mapped and we would not need to perform
    kmap_atomic/kunmap_atomic during each lookup
    
    Convert bat updates to use kmap_local_page/kunmap_local.
    
    https://virtuozzo.atlassian.net/browse/VSTOR-91659
    Suggested-by: Denis V. Lunev <d...@openvz.org>
    Signed-off-by: Alexander Atanasov <alexander.atana...@virtuozzo.com>
    
    ======
    Patchset description:
    ploop: optimistations and scalling
    
    Ploop processes requsts in a different threads in parallel
    where possible which results in significant improvement in
    performance and makes further optimistations possible.
    
    Known bugs:
      - delayed metadata writeback is not working and is missing error handling
         - patch to disable it until fixed
      - fast path is not working - causes rcu lockups - patch to disable it
    
    Further improvements:
      - optimize md pages lookups
    
    Alexander Atanasov (50):
      dm-ploop: md_pages map all pages at creation time
      dm-ploop: Use READ_ONCE/WRITE_ONCE to access md page data
      dm-ploop: fsync after all pios are sent
      dm-ploop: move md status to use proper bitops
      dm-ploop: convert wait_list and wb_batch_llist to use lockless lists
      dm-ploop: convert enospc handling to use lockless lists
      dm-ploop: convert suspended_pios list to use lockless list
      dm-ploop: convert the rest of the lists to use llist variant
      dm-ploop: combine processing of pios thru prepare list and remove
        fsync worker
      dm-ploop: move from wq to kthread
      dm-ploop: move preparations of pios into the caller from worker
      dm-ploop: fast path execution for reads
      dm-ploop: do not use a wrapper for set_bit to make a page writeback
      dm-ploop: BAT use only one list for writeback
      dm-ploop: make md writeback timeout to be per page
      dm-ploop: add interface to disable bat writeback delay
      dm-ploop: convert wb_batch_list to lockless variant
      dm-ploop: convert high_prio to status
      dm-ploop: split cow processing into two functions
      dm-ploop: convert md page rw lock to spin lock
      dm-ploop: convert bat_rwlock to bat_lock spinlock
      dm-ploop: prepare bat updates under bat_lock
      dm-ploop: make ploop_bat_write_complete ready for parallel pio
        completion
      dm-ploop: make ploop_submit_metadata_writeback return number of
        requests sent
      dm-ploop: introduce pio runner threads
      dm-ploop: add pio list ids to be used when passing pios to runners
      dm-ploop: process pios via runners
      dm-ploop: disable metadata writeback delay
      dm-ploop: disable fast path
      dm-ploop: use lockless lists for chained cow updates list
      dm-ploop: use lockless lists for data ready pios
      dm-ploop: give runner threads better name
      dm-ploop: resize operation - add holes bitmap locking
      dm-ploop: remove unnecessary operations
      dm-ploop: use filp per thread
      dm-ploop: catch if we try to advance pio past bio end
      dm-ploop: support REQ_FUA for data pios
      dm-ploop: proplerly access nr_bat_entries
      dm-ploop: fix locking and improve error handling when submitting pios
      dm-ploop: fix how ENOTBLK is handled
      dm-ploop: sync when suspended or stopping
      dm-ploop: rework bat completion logic
      dm-ploop: rework logic in pio processing
      dm-ploop: end fsync pios in parallel
      dm-ploop: make filespace preallocations async
      dm-ploop: resubmit enospc pios from dispatcher thread
      dm-ploop: dm-ploop: simplify discard completion
      dm-ploop: use GFP_ATOMIC instead of GFP_NOIO
      dm-ploop: fix locks used in mixed context
      dm-ploop: fix how current flags are managed inside threads
    
    Andrey Zhadchenko (13):
      dm-ploop: do not flush after metadata writes
      dm-ploop: set IOCB_DSYNC on all FUA requests
      dm-ploop: remove extra ploop_cluster_is_in_top_delta()
      dm-ploop: introduce per-md page locking
      dm-ploop: reduce BAT accesses on discard completion
      dm-ploop: simplify llseek
      dm-ploop: speed up ploop_prepare_bat_update()
      dm-ploop: make new allocations immediately visible in BAT
      dm-ploop: drop ploop_cluster_is_in_top_delta()
      dm-ploop: do not wait for BAT update for non-FUA requests
      dm-ploop: add delay for metadata writeback
      dm-ploop: submit all postponed metadata on REQ_OP_FLUSH
      dm-ploop: handle REQ_PREFLUSH
    
    Feature: dm-ploop: ploop target driver
---
 drivers/md/dm-ploop-bat.c | 21 ++++++++-------------
 drivers/md/dm-ploop-cmd.c | 34 +++++++++++++---------------------
 drivers/md/dm-ploop-map.c | 39 +++++++++++++++++----------------------
 drivers/md/dm-ploop.h     | 13 ++++++-------
 4 files changed, 44 insertions(+), 63 deletions(-)

diff --git a/drivers/md/dm-ploop-bat.c b/drivers/md/dm-ploop-bat.c
index cbe48e376097..f28bd5d7b322 100644
--- a/drivers/md/dm-ploop-bat.c
+++ b/drivers/md/dm-ploop-bat.c
@@ -87,6 +87,7 @@ static struct md_page *ploop_alloc_md_page(u32 id)
        md->bat_levels = levels;
        md->piwb = NULL;
        md->page = page;
+       md->kmpage = kmap(page);
        md->id = id;
        return md;
 err_page:
@@ -99,6 +100,7 @@ ALLOW_ERROR_INJECTION(ploop_alloc_md_page, NULL);
 
 void ploop_free_md_page(struct md_page *md)
 {
+       kunmap(md->page);
        put_page(md->page);
        kfree(md->bat_levels);
        kfree(md);
@@ -120,9 +122,8 @@ int ploop_prealloc_md_pages(struct rb_root *root, u32 
nr_bat_entries,
                md = ploop_alloc_md_page(i);
                if (!md)
                        return -ENOMEM;
-               addr = kmap_atomic(md->page);
+               addr = md->kmpage;
                memset32(addr, BAT_ENTRY_NONE, PAGE_SIZE / 4);
-               kunmap_atomic(addr);
 
                __md_page_insert(root, md);
        }
@@ -143,9 +144,8 @@ bool ploop_try_update_bat_entry(struct ploop *ploop, u32 
clu, u8 level, u32 dst_
        clu = ploop_bat_clu_idx_in_page(clu); /* relative offset */
 
        if (md->bat_levels[clu] == level) {
-               bat_entries = kmap_atomic(md->page);
+               bat_entries = md->kmpage;
                bat_entries[clu] = dst_clu;
-               kunmap_atomic(bat_entries);
                return true;
        }
        return false;
@@ -245,7 +245,7 @@ static int ploop_delta_check_header(struct ploop *ploop,
 
        WARN_ON_ONCE(md0->id != 0);
 
-       d_hdr = kmap(md0->page);
+       d_hdr = md0->kmpage;
        if (memcmp(d_hdr->m_Sig, ploop->m_Sig, sizeof(d_hdr->m_Sig)) ||
            d_hdr->m_Sectors != ploop->m_Sectors ||
            d_hdr->m_Type != ploop->m_Type)
@@ -263,7 +263,6 @@ static int ploop_delta_check_header(struct ploop *ploop,
        *delta_nr_be_ret = delta_nr_be;
        ret = 0;
 out:
-       kunmap(md0->page);
        return ret;
 }
 ALLOW_ERROR_INJECTION(ploop_delta_check_header, ERRNO);
@@ -282,7 +281,7 @@ static int ploop_convert_bat_entries(struct ploop *ploop, 
struct rb_root *md_roo
 
        page_id = 0;
        rb_root_for_each_md_page(md_root, md, node) {
-               bat_entries = kmap(md->page);
+               bat_entries = md->kmpage;
                init_be_iter(nr_be, md->id, &i, &end);
                WARN_ON_ONCE(page_id != md->id);
                page_id++;
@@ -295,7 +294,6 @@ static int ploop_convert_bat_entries(struct ploop *ploop, 
struct rb_root *md_roo
                        if (bat_entries[i] < bat_clusters)
                                ret = -EXDEV;
                }
-               kunmap(md->page);
 
                if (ret || page_id == nr_pages)
                        break;
@@ -416,9 +414,9 @@ static void ploop_apply_delta_mappings(struct ploop *ploop,
 
        write_lock_irq(&ploop->bat_rwlock);
        ploop_for_each_md_page(ploop, md, node) {
-               bat_entries = kmap_atomic(md->page);
+               bat_entries = md->kmpage;
                if (!is_raw)
-                       d_bat_entries = kmap_atomic(d_md->page);
+                       d_bat_entries = d_md->kmpage;
 
                if (is_top_level && md->id == 0 && !is_raw) {
                        /* bat_entries before PLOOP_MAP_OFFSET is hdr */
@@ -453,9 +451,6 @@ static void ploop_apply_delta_mappings(struct ploop *ploop,
                                ploop_set_not_hole(ploop, bat_entries[i]);
                }
 
-               kunmap_atomic(bat_entries);
-               if (!is_raw)
-                       kunmap_atomic(d_bat_entries);
                if (stop)
                        break;
                if (!is_raw)
diff --git a/drivers/md/dm-ploop-cmd.c b/drivers/md/dm-ploop-cmd.c
index 88519f590148..d9d41d79d937 100644
--- a/drivers/md/dm-ploop-cmd.c
+++ b/drivers/md/dm-ploop-cmd.c
@@ -43,7 +43,7 @@ static void ploop_advance_holes_bitmap(struct ploop *ploop,
        swap(cmd->resize.hb_nr, ploop->hb_nr);
        ploop_for_each_md_page(ploop, md, node) {
                ploop_init_be_iter(ploop, md->id, &i, &end);
-               bat_entries = kmap_atomic(md->page);
+               bat_entries = md->kmpage;
                for (; i <= end; i++) {
                        if (!ploop_md_page_cluster_is_in_top_delta(ploop, md, 
i))
                                continue;
@@ -54,7 +54,6 @@ static void ploop_advance_holes_bitmap(struct ploop *ploop,
                                ploop_hole_clear_bit(dst_clu, ploop);
                        }
                }
-               kunmap_atomic(bat_entries);
        }
        write_unlock_irq(&ploop->bat_rwlock);
 }
@@ -161,7 +160,7 @@ static u32 ploop_find_bat_entry(struct ploop *ploop, u32 
dst_clu, bool *is_locke
        read_lock_irq(&ploop->bat_rwlock);
        ploop_for_each_md_page(ploop, md, node) {
                ploop_init_be_iter(ploop, md->id, &i, &end);
-               bat_entries = kmap_atomic(md->page);
+               bat_entries = md->kmpage;
                for (; i <= end; i++) {
                        if (bat_entries[i] != dst_clu)
                                continue;
@@ -170,7 +169,6 @@ static u32 ploop_find_bat_entry(struct ploop *ploop, u32 
dst_clu, bool *is_locke
                                break;
                        }
                }
-               kunmap_atomic(bat_entries);
                if (clu != UINT_MAX)
                        break;
        }
@@ -265,9 +263,9 @@ static int ploop_write_zero_cluster_sync(struct ploop 
*ploop,
        int i;
 
        for (i = 0; i < pio->bi_vcnt; i++) {
-               data = kmap_atomic(pio->bi_io_vec[i].bv_page);
+               data = kmap(pio->bi_io_vec[i].bv_page);
                memset(data, 0, PAGE_SIZE);
-               kunmap_atomic(data);
+               kunmap(data);
        }
 
        return ploop_write_cluster_sync(ploop, pio, clu);
@@ -390,12 +388,12 @@ static int ploop_grow_update_header(struct ploop *ploop,
        clus = DIV_ROUND_UP(size, CLU_SIZE(ploop));
        first_block_off = CLU_TO_SEC(ploop, clus);
 
-       hdr = kmap_atomic(piwb->bat_page);
+       hdr = kmap_local_page(piwb->bat_page);
        /* TODO: head and cylinders */
        nr_be = hdr->m_Size = cpu_to_le32(cmd->resize.nr_bat_entries);
        sectors = hdr->m_SizeInSectors_v2 = 
cpu_to_le64(cmd->resize.new_sectors);
        offset = hdr->m_FirstBlockOffset = cpu_to_le32(first_block_off);
-       kunmap_atomic(hdr);
+       kunmap_local(hdr);
 
        ploop_make_md_wb(ploop, md);
        init_completion(&comp);
@@ -407,11 +405,10 @@ static int ploop_grow_update_header(struct ploop *ploop,
        ret = blk_status_to_errno(bi_status);
        if (!ret) {
                /* Now update our cached page */
-               hdr = kmap_atomic(cmd->resize.md0->page);
+               hdr = cmd->resize.md0->kmpage;
                hdr->m_Size = nr_be;
                hdr->m_SizeInSectors_v2 = sectors;
                hdr->m_FirstBlockOffset = offset;
-               kunmap_atomic(hdr);
        } else {
                PL_ERR("Failed to update hdr: %d", ret);
        }
@@ -545,9 +542,8 @@ static int ploop_resize(struct ploop *ploop, sector_t 
new_sectors)
        md0 = ploop_md_page_find(ploop, 0);
        if (WARN_ON(!md0))
                return -EIO;
-       hdr = kmap(md0->page);
+       hdr = md0->kmpage;
        old_sectors = le64_to_cpu(hdr->m_SizeInSectors_v2);
-       kunmap(md0->page);
 
        if (old_sectors == new_sectors)
                return 0;
@@ -731,8 +727,8 @@ static void notify_delta_merged(struct ploop *ploop, u8 
level,
        write_lock_irq(&ploop->bat_rwlock);
        ploop_for_each_md_page(ploop, md, node) {
                init_be_iter(nr_be, md->id, &i, &end);
-               bat_entries = kmap_atomic(md->page);
-               d_bat_entries = kmap_atomic(d_md->page);
+               bat_entries = md->kmpage;
+               d_bat_entries = d_md->kmpage;
                for (; i <= end; i++) {
                        clu = ploop_page_clu_idx_to_bat_clu(md->id, i);
                        if (clu == nr_be - 1)
@@ -759,14 +755,12 @@ static void notify_delta_merged(struct ploop *ploop, u8 
level,
                         * 1)next delta (which became renumbered) or
                         * 2)prev delta (if !@forward).
                         */
-                       bat_entries[i] = d_bat_entries[i];
+                       WRITE_ONCE(bat_entries[i], READ_ONCE(d_bat_entries[i]));
                        if (!forward)
                                md->bat_levels[i] = level - 1;
                        else
                                md->bat_levels[i] = level;
                }
-               kunmap_atomic(bat_entries);
-               kunmap_atomic(d_bat_entries);
                if (stop)
                        break;
                d_md = ploop_md_next_entry(d_md);
@@ -989,7 +983,7 @@ static int process_flip_upper_deltas(struct ploop *ploop)
        /* Flip bat entries */
        ploop_for_each_md_page(ploop, md, node) {
                ploop_init_be_iter(ploop, md->id, &i, &end);
-               bat_entries = kmap_atomic(md->page);
+               bat_entries = md->kmpage;
                for (; i <= end; i++) {
                        if (bat_entries[i] == BAT_ENTRY_NONE)
                                continue;
@@ -1000,7 +994,6 @@ static int process_flip_upper_deltas(struct ploop *ploop)
                                md->bat_levels[i] = level;
                        }
                }
-               kunmap_atomic(bat_entries);
        }
 
        /* FIXME */
@@ -1057,7 +1050,7 @@ static int ploop_check_delta_before_flip(struct ploop 
*ploop, struct file *file)
        write_lock_irq(&ploop->bat_rwlock);
        ploop_for_each_md_page(ploop, md, node) {
                init_be_iter(nr_be, md->id, &i, &end);
-               d_bat_entries = kmap_atomic(d_md->page);
+               d_bat_entries = d_md->kmpage;
                for (; i <= end; i++) {
                        if (ploop_md_page_cluster_is_in_top_delta(ploop, md, i) 
&&
                            d_bat_entries[i] != BAT_ENTRY_NONE) {
@@ -1073,7 +1066,6 @@ static int ploop_check_delta_before_flip(struct ploop 
*ploop, struct file *file)
                        goto unmap;
                }
 unmap:
-               kunmap_atomic(d_bat_entries);
                if (stop)
                        break;
                d_md = ploop_md_next_entry(d_md);
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index 0809867ebd3a..634f0679be3f 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -722,11 +722,10 @@ static void ploop_release_cluster(struct ploop *ploop, 
u32 clu)
 
        clu = ploop_bat_clu_idx_in_page(clu); /* relative to page */
 
-       bat_entries = kmap_atomic(md->page);
+       bat_entries = md->kmpage;
        dst_clu = bat_entries[clu];
        bat_entries[clu] = BAT_ENTRY_NONE;
        md->bat_levels[clu] = 0;
-       kunmap_atomic(bat_entries);
 
        ploop_hole_set_bit(dst_clu, ploop);
 }
@@ -761,7 +760,7 @@ static void ploop_advance_local_after_bat_wb(struct ploop 
*ploop,
        LIST_HEAD(list);
 
        BUG_ON(!md);
-       bat_entries = kmap_atomic(md->page);
+       bat_entries = md->kmpage;
 
        /* Absolute number of first index in page (negative for page#0) */
        off = piwb->page_id * PAGE_SIZE / sizeof(map_index_t);
@@ -775,7 +774,7 @@ static void ploop_advance_local_after_bat_wb(struct ploop 
*ploop,
        if (!piwb->page_id)
                i = PLOOP_MAP_OFFSET;
 
-       dst_clu = kmap_atomic(piwb->bat_page);
+       dst_clu = kmap_local_page(piwb->bat_page);
        write_lock_irqsave(&ploop->bat_rwlock, flags);
 
        for (; i < last; i++) {
@@ -806,8 +805,7 @@ static void ploop_advance_local_after_bat_wb(struct ploop 
*ploop,
        md->piwb = NULL;
        list_splice_tail_init(&md->wait_list, &list);
        write_unlock_irqrestore(&ploop->bat_rwlock, flags);
-       kunmap_atomic(dst_clu);
-       kunmap_atomic(bat_entries);
+       kunmap_local(dst_clu);
 
        if (!list_empty(&list))
                ploop_dispatch_pios(ploop, NULL, &list);
@@ -911,7 +909,7 @@ static int ploop_prepare_bat_update(struct ploop *ploop, 
struct md_page *md,
                goto err;
        ploop_init_pio(ploop, REQ_OP_WRITE, pio);
 
-       bat_entries = kmap_atomic(md->page);
+       bat_entries = md->kmpage;
 
        write_lock_irq(&ploop->bat_rwlock);
        md->piwb = piwb;
@@ -919,7 +917,7 @@ static int ploop_prepare_bat_update(struct ploop *ploop, 
struct md_page *md,
        write_unlock_irq(&ploop->bat_rwlock);
 
        piwb->page_id = page_id;
-       to = kmap_atomic(page);
+       to = kmap_local_page(page);
        memcpy((void *)to, bat_entries, PAGE_SIZE);
 
        /* Absolute number of first index in page (negative for page#0) */
@@ -948,9 +946,7 @@ static int ploop_prepare_bat_update(struct ploop *ploop, 
struct md_page *md,
                        to[i] = 0;
        }
 
-       kunmap_atomic(to);
-       kunmap_atomic(bat_entries);
-
+       kunmap_local(to);
        piwb->type = type;
        return 0;
 err:
@@ -980,9 +976,9 @@ static void ploop_bat_page_zero_cluster(struct ploop *ploop,
        /* Cluster index related to the page[page_id] start */
        clu = ploop_bat_clu_idx_in_page(clu);
 
-       to = kmap_atomic(piwb->bat_page);
+       to = kmap_local_page(piwb->bat_page);
        to[clu] = 0;
-       kunmap_atomic(to);
+       kunmap_local(to);
 }
 
 static int ploop_find_dst_clu_bit(struct ploop *ploop, u32 *ret_dst_clu)
@@ -1086,7 +1082,6 @@ ALLOW_ERROR_INJECTION(ploop_allocate_cluster, ERRNO);
 static int ploop_alloc_cluster(struct ploop *ploop, struct ploop_index_wb 
*piwb,
                               u32 clu, u32 *dst_clu)
 {
-       struct page *page = piwb->bat_page;
        bool already_alloced = false;
        map_index_t *to;
        int ret = 0;
@@ -1094,13 +1089,13 @@ static int ploop_alloc_cluster(struct ploop *ploop, 
struct ploop_index_wb *piwb,
        /* Cluster index related to the page[page_id] start */
        clu -= piwb->page_id * PAGE_SIZE / sizeof(map_index_t) - 
PLOOP_MAP_OFFSET;
 
-       to = kmap_atomic(page);
+       to = kmap_local_page(piwb->bat_page);
        if (to[clu]) {
                /* Already mapped by one of previous bios */
                *dst_clu = to[clu];
                already_alloced = true;
        }
-       kunmap_atomic(to);
+       kunmap_local(to);
 
        if (already_alloced)
                goto out;
@@ -1109,9 +1104,9 @@ static int ploop_alloc_cluster(struct ploop *ploop, 
struct ploop_index_wb *piwb,
        if (ret < 0)
                goto out;
 
-       to = kmap_atomic(page);
+       to = kmap_local_page(piwb->bat_page);
        to[clu] = *dst_clu;
-       kunmap_atomic(to);
+       kunmap_local(to);
 out:
        return ret;
 }
@@ -1391,10 +1386,10 @@ static void ploop_submit_cow_index_wb(struct ploop_cow 
*cow)
 
        clu -= page_id * PAGE_SIZE / sizeof(map_index_t) - PLOOP_MAP_OFFSET;
 
-       to = kmap_atomic(piwb->bat_page);
+       to = kmap_local_page(piwb->bat_page);
        WARN_ON(to[clu]);
        to[clu] = cow->dst_clu;
-       kunmap_atomic(to);
+       kunmap_local(to);
 
        /* Prevent double clearing of holes_bitmap bit on complete_cow() */
        cow->dst_clu = BAT_ENTRY_NONE;
@@ -1725,7 +1720,7 @@ static void ploop_process_one_discard_pio(struct ploop 
*ploop, struct pio *pio)
        /* Cluster index related to the page[page_id] start */
        clu -= piwb->page_id * PAGE_SIZE / sizeof(map_index_t) - 
PLOOP_MAP_OFFSET;
 
-       to = kmap_atomic(piwb->bat_page);
+       to = kmap_local_page(piwb->bat_page);
        if (WARN_ON_ONCE(!to[clu])) {
                pio->bi_status = BLK_STS_IOERR;
                goto err;
@@ -1733,7 +1728,7 @@ static void ploop_process_one_discard_pio(struct ploop 
*ploop, struct pio *pio)
                to[clu] = 0;
                list_add_tail(&pio->list, &piwb->ready_data_pios);
        }
-       kunmap_atomic(to);
+       kunmap_local(to);
 
        if (bat_update_prepared)
                ploop_md_make_dirty(ploop, md);
diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
index e693d0ed7fe4..3783d42a6162 100644
--- a/drivers/md/dm-ploop.h
+++ b/drivers/md/dm-ploop.h
@@ -118,6 +118,7 @@ struct md_page {
 #define MD_WRITEBACK   (1U << 2) /* Writeback was submitted */
        unsigned int status;
        struct page *page;
+       void *kmpage;
        u8 *bat_levels;
        struct list_head wait_list;
 
@@ -430,9 +431,8 @@ static inline u32 ploop_bat_entries(struct ploop *ploop, 
u32 clu,
        if (md_ret)
                *md_ret = md;
 
-       bat_entries = kmap_atomic(md->page);
-       dst_clu = bat_entries[clu];
-       kunmap_atomic(bat_entries);
+       bat_entries = md->kmpage;
+       dst_clu = READ_ONCE(bat_entries[clu]);
        return dst_clu;
 }
 
@@ -463,11 +463,10 @@ static inline bool 
ploop_md_page_cluster_is_in_top_delta(struct ploop *ploop,
                return false;
        }
 
-       bat_entries = kmap_atomic(md->page);
-       if (bat_entries[clu] == BAT_ENTRY_NONE ||
-           md->bat_levels[clu] < ploop_top_level(ploop))
+       bat_entries = md->kmpage;
+       if (READ_ONCE(bat_entries[clu]) == BAT_ENTRY_NONE ||
+           READ_ONCE(md->bat_levels[clu]) < ploop_top_level(ploop))
                ret = false;
-       kunmap_atomic(bat_entries);
        return ret;
 }
 
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to