Merging required to back this change, so do it again.

Signed-off-by: Alexander Atanasov <alexander.atana...@virtuozzo.com>
---
 drivers/md/dm-ploop-map.c | 49 ++++++++++++++++++++-------------------
 1 file changed, 25 insertions(+), 24 deletions(-)

diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index 621a952ef32c..6cab948dfa29 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -562,7 +562,7 @@ static bool ploop_md_make_dirty(struct ploop *ploop, struct 
md_page *md)
        WARN_ON_ONCE(test_bit(MD_WRITEBACK, &md->status));
        md->dirty_time = ktime_get();
        if (!test_and_set_bit(MD_DIRTY, &md->status)) {
-               list_add(&md->wb_link, &ploop->wb_batch_list);
+               llist_add((struct llist_node *)&md->wb_link, 
&ploop->wb_batch_llist);
                new = true;
        }
        write_unlock_irqrestore(&ploop->bat_rwlock, flags);
@@ -1831,36 +1831,36 @@ static void ploop_submit_metadata_writeback(struct 
ploop *ploop)
 {
        ktime_t ktime, ktimeout;
        LIST_HEAD(ll_skipped);
-       struct md_page *md, *mtmp;
+       struct md_page *md;
+       struct llist_node *pos, *t;
+       struct llist_node *ll_wb_batch;
 
        ktime = ktime_get();
        ktimeout = ktime_add_ms(ktime, ploop->md_submit_delay_ms);
+       /* Lock here to protect against md_inflight counting */
+       write_lock_irq(&ploop->bat_rwlock);
+       ll_wb_batch = llist_del_all(&ploop->wb_batch_llist);
+       write_unlock_irq(&ploop->bat_rwlock);
        /*
         * Pages are set dirty so no one must touch lists
         * if new md entries are dirtied they are added at the start of the list
         */
-       list_for_each_entry_safe(md, mtmp, &ploop->wb_batch_list, wb_link) {
-               write_lock_irq(&ploop->bat_rwlock);
-               list_del_init(&md->wb_link);
+       llist_for_each_safe(pos, t, ll_wb_batch ) {
+               md = list_entry((struct list_head *)pos, typeof(*md), wb_link);
+               INIT_LIST_HEAD(&md->wb_link);
                if (md->high_prio || ktime_after(md->dirty_time, ktimeout)
                    || ploop->force_md_writeback) {
                        /* L1L2 mustn't be redirtyed, when wb in-flight! */
                        WARN_ON_ONCE(!test_bit(MD_DIRTY, &md->status));
                        WARN_ON_ONCE(test_bit(MD_WRITEBACK, &md->status));
+                       md->high_prio = false;
                        set_bit(MD_WRITEBACK, &md->status);
                        clear_bit(MD_DIRTY, &md->status);
-                       md->high_prio = false;
-                       write_unlock_irq(&ploop->bat_rwlock);
                        ploop_index_wb_submit(ploop, md->piwb);
                } else {
-                       list_add_tail(&md->wb_link, &ll_skipped);
-                       write_unlock_irq(&ploop->bat_rwlock);
+                       llist_add((struct llist_node *)&md->wb_link, 
&ploop->wb_batch_llist);
                }
        }
-       write_lock_irq(&ploop->bat_rwlock);
-       list_splice(&ll_skipped, &ploop->wb_batch_list);
-       write_unlock_irq(&ploop->bat_rwlock);
-
 }
 
 static void process_ploop_fsync_work(struct ploop *ploop, struct llist_node 
*llflush_pios)
@@ -1993,18 +1993,19 @@ static void ploop_preflush_endio(struct pio *pio, void 
*orig_pio_ptr,
        }
 }
 
-static void ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
+static int ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
 {
        struct pio *flush_pio = pio;
        struct md_page *md, *n;
        int md_inflight = 0;
+       struct llist_node *pos, *t;
 
        if (pio->bi_op & REQ_PREFLUSH && (pio->bi_op & REQ_OP_MASK) != 
REQ_OP_FLUSH) {
                flush_pio = ploop_alloc_pio(ploop, GFP_NOIO);
                if (!flush_pio) {
                        pio->bi_status = BLK_STS_RESOURCE;
                        ploop_pio_endio(pio);
-                       return;
+                       return -1;
                }
 
                ploop_init_pio(ploop, REQ_OP_FLUSH, flush_pio);
@@ -2015,21 +2016,17 @@ static void ploop_prepare_flush(struct ploop *ploop, 
struct pio *pio)
        }
 
        write_lock_irq(&ploop->bat_rwlock);
-
-       list_for_each_entry_safe(md, n, &ploop->wb_batch_list, wb_link) {
+       llist_for_each_safe(pos, t, ploop->wb_batch_llist.first) {
+               md = list_entry((struct list_head *)pos, typeof(*md), wb_link);
                md_inflight++;
                md->piwb->flush_pio = flush_pio;
                md->high_prio = true;
        }
 
        atomic_set(&flush_pio->md_inflight, md_inflight);
-
        write_unlock_irq(&ploop->bat_rwlock);
 
-       if (md_inflight)
-               ploop_schedule_work(ploop);
-       else
-               ploop_dispatch_pios(ploop, flush_pio, NULL);
+       return md_inflight;
 }
 
 static void ploop_submit_embedded_pio(struct ploop *ploop, struct pio *pio)
@@ -2058,8 +2055,12 @@ static void ploop_submit_embedded_pio(struct ploop 
*ploop, struct pio *pio)
        ploop_inc_nr_inflight(ploop, pio);
 
        if ((pio->bi_op & REQ_OP_MASK) == REQ_OP_FLUSH || pio->bi_op & 
REQ_PREFLUSH) {
-               ploop_prepare_flush(ploop, pio);
-               return;
+               ret = ploop_prepare_flush(ploop, pio);
+               if (ret < 0)
+                       return;
+               if (ret > 0)
+                       goto out;
+               /* Will add to prepare list and schedule work */
        }
 
        if (pio->queue_list_id == PLOOP_LIST_FLUSH) {
-- 
2.43.0

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to