Rename cluster->clu

Signed-off-by: Kirill Tkhai <[email protected]>
---
 drivers/md/dm-ploop-bat.c |   38 +++---
 drivers/md/dm-ploop-cmd.c |  140 +++++++++++------------
 drivers/md/dm-ploop-map.c |  278 +++++++++++++++++++++++----------------------
 drivers/md/dm-ploop.h     |   72 ++++++------
 4 files changed, 264 insertions(+), 264 deletions(-)

diff --git a/drivers/md/dm-ploop-bat.c b/drivers/md/dm-ploop-bat.c
index 14882af705d5..df3f81c4ebd9 100644
--- a/drivers/md/dm-ploop-bat.c
+++ b/drivers/md/dm-ploop-bat.c
@@ -123,10 +123,10 @@ int prealloc_md_pages(struct rb_root *root, unsigned int 
nr_bat_entries,
        return 0;
 }
 
-bool try_update_bat_entry(struct ploop *ploop, unsigned int cluster,
-                         u8 level, unsigned int dst_cluster)
+bool try_update_bat_entry(struct ploop *ploop, unsigned int clu,
+                         u8 level, unsigned int dst_clu)
 {
-       unsigned int *bat_entries, id = bat_clu_to_page_nr(cluster);
+       unsigned int *bat_entries, id = bat_clu_to_page_nr(clu);
        struct md_page *md = md_page_find(ploop, id);
 
        lockdep_assert_held(&ploop->bat_rwlock);
@@ -134,11 +134,11 @@ bool try_update_bat_entry(struct ploop *ploop, unsigned 
int cluster,
        if (!md)
                return false;
 
-       cluster = bat_clu_idx_in_page(cluster); /* relative offset */
+       clu = bat_clu_idx_in_page(clu); /* relative offset */
 
-       if (md->bat_levels[cluster] == level) {
+       if (md->bat_levels[clu] == level) {
                bat_entries = kmap_atomic(md->page);
-               bat_entries[cluster] = dst_cluster;
+               bat_entries[clu] = dst_clu;
                kunmap_atomic(bat_entries);
                return true;
        }
@@ -177,13 +177,13 @@ static int parse_bat_entries(struct ploop *ploop, 
map_index_t *bat_entries,
 }
 
 /*
- * Read from disk and fill bat_entries. Note, that on enter here, cluster #0
+ * Read from disk and fill bat_entries. Note, that on enter here, clu #0
  * is already read from disk (with header) -- just parse bio pages content.
  */
 int ploop_read_bat(struct ploop *ploop, struct bio *bio, u8 nr_deltas)
 {
        unsigned int id, entries_per_page, nr_copy, nr_all, page, i = 0;
-       map_index_t *from, *to, cluster = 0;
+       map_index_t *from, *to, clu = 0;
        struct md_page *md;
        int ret = 0;
 
@@ -223,7 +223,7 @@ int ploop_read_bat(struct ploop *ploop, struct bio *bio, u8 
nr_deltas)
                                goto out;
                }
 
-               ret = ploop_read_cluster_sync(ploop, bio, ++cluster);
+               ret = ploop_read_cluster_sync(ploop, bio, ++clu);
                if (ret)
                        goto out;
 
@@ -425,11 +425,11 @@ int ploop_read_delta_metadata(struct ploop *ploop, struct 
file *file,
        return ret;
 }
 
-static void ploop_set_not_hole(struct ploop *ploop, u32 dst_cluster)
+static void ploop_set_not_hole(struct ploop *ploop, u32 dst_clu)
 {
        /* Cluster may refer out holes_bitmap after shrinking */
-       if (dst_cluster < ploop->hb_nr)
-               ploop_hole_clear_bit(dst_cluster, ploop);
+       if (dst_clu < ploop->hb_nr)
+               ploop_hole_clear_bit(dst_clu, ploop);
 }
 
 /*
@@ -444,7 +444,7 @@ static void apply_delta_mappings(struct ploop *ploop, 
struct ploop_delta *deltas
 {
        map_index_t *bat_entries, *delta_bat_entries;
        bool is_top_level, is_raw, stop = false;
-       unsigned int i, end, dst_cluster, clu;
+       unsigned int i, end, dst_clu, clu;
        struct rb_node *node;
        struct md_page *md;
 
@@ -479,16 +479,16 @@ static void apply_delta_mappings(struct ploop *ploop, 
struct ploop_delta *deltas
                        }
 
                        if (!is_raw)
-                               dst_cluster = delta_bat_entries[i];
+                               dst_clu = delta_bat_entries[i];
                        else {
-                               dst_cluster = clu;
-                               if (dst_cluster >= size_in_clus)
-                                       dst_cluster = BAT_ENTRY_NONE;
+                               dst_clu = clu;
+                               if (dst_clu >= size_in_clus)
+                                       dst_clu = BAT_ENTRY_NONE;
                        }
-                       if (dst_cluster == BAT_ENTRY_NONE)
+                       if (dst_clu == BAT_ENTRY_NONE)
                                continue;
                        md->bat_levels[i] = level;
-                       bat_entries[i] = dst_cluster;
+                       bat_entries[i] = dst_clu;
 set_not_hole:
                        if (is_top_level)
                                ploop_set_not_hole(ploop, bat_entries[i]);
diff --git a/drivers/md/dm-ploop-cmd.c b/drivers/md/dm-ploop-cmd.c
index bba1c293f088..50f117987377 100644
--- a/drivers/md/dm-ploop-cmd.c
+++ b/drivers/md/dm-ploop-cmd.c
@@ -22,7 +22,7 @@
 static void ploop_advance_holes_bitmap(struct ploop *ploop,
                                       struct ploop_cmd *cmd)
 {
-       unsigned int i, end, size, dst_cluster, *bat_entries;
+       unsigned int i, end, size, dst_clu, *bat_entries;
        struct rb_node *node;
        struct md_page *md;
 
@@ -45,11 +45,11 @@ static void ploop_advance_holes_bitmap(struct ploop *ploop,
                for (; i <= end; i++) {
                        if (!md_page_cluster_is_in_top_delta(ploop, md, i))
                                continue;
-                       dst_cluster = bat_entries[i];
+                       dst_clu = bat_entries[i];
                        /* This may happen after grow->shrink->(now) grow */
-                       if (dst_cluster < ploop->hb_nr &&
-                           test_bit(dst_cluster, ploop->holes_bitmap)) {
-                               ploop_hole_clear_bit(dst_cluster, ploop);
+                       if (dst_clu < ploop->hb_nr &&
+                           test_bit(dst_clu, ploop->holes_bitmap)) {
+                               ploop_hole_clear_bit(dst_clu, ploop);
                        }
                }
                kunmap_atomic(bat_entries);
@@ -80,7 +80,7 @@ static int wait_for_completion_maybe_killable(struct 
completion *comp,
  * bios are completed. This waits for completion of simple submitted
  * action like write to origin_dev or read from delta, but it never
  * guarantees completion of complex actions like "data write + index
- * writeback" (for index protection look at cluster locks). This is
+ * writeback" (for index protection look at clu locks). This is
  * weaker, than "dmsetup suspend".
  * It is called from kwork only, so this can't be executed in parallel.
  */
@@ -142,12 +142,12 @@ static void ploop_resume_submitting_pios(struct ploop 
*ploop)
        submit_pios(ploop, &list);
 }
 
-/* Find existing BAT cluster pointing to dst_cluster */
+/* Find existing BAT clu pointing to dst_clu */
 static unsigned int ploop_find_bat_entry(struct ploop *ploop,
-                                        unsigned int dst_cluster,
+                                        unsigned int dst_clu,
                                         bool *is_locked)
 {
-       unsigned int i, end, *bat_entries, cluster = UINT_MAX;
+       unsigned int i, end, *bat_entries, clu = UINT_MAX;
        struct rb_node *node;
        struct md_page *md;
 
@@ -156,31 +156,31 @@ static unsigned int ploop_find_bat_entry(struct ploop 
*ploop,
                init_bat_entries_iter(ploop, md->id, &i, &end);
                bat_entries = kmap_atomic(md->page);
                for (; i <= end; i++) {
-                       if (bat_entries[i] != dst_cluster)
+                       if (bat_entries[i] != dst_clu)
                                continue;
                        if (md_page_cluster_is_in_top_delta(ploop, md, i)) {
-                               cluster = page_clu_idx_to_bat_clu(md->id, i);
+                               clu = page_clu_idx_to_bat_clu(md->id, i);
                                break;
                        }
                }
                kunmap_atomic(bat_entries);
-               if (cluster != UINT_MAX)
+               if (clu != UINT_MAX)
                        break;
        }
        read_unlock_irq(&ploop->bat_rwlock);
 
        *is_locked = false;
-       if (cluster != UINT_MAX) {
+       if (clu != UINT_MAX) {
                spin_lock_irq(&ploop->deferred_lock);
-               *is_locked = find_lk_of_cluster(ploop, cluster);
+               *is_locked = find_lk_of_cluster(ploop, clu);
                spin_unlock_irq(&ploop->deferred_lock);
        }
 
-       return cluster;
+       return clu;
 }
 
 void pio_prepare_offsets(struct ploop *ploop, struct pio *pio,
-                        unsigned int cluster)
+                        unsigned int clu)
 {
        int i, nr_pages = nr_pages_in_cluster(ploop);
 
@@ -192,7 +192,7 @@ void pio_prepare_offsets(struct ploop *ploop, struct pio 
*pio,
                pio->bi_io_vec[i].bv_offset = 0;
                pio->bi_io_vec[i].bv_len = PAGE_SIZE;
        }
-       pio->bi_iter.bi_sector = CLU_TO_SEC(ploop, cluster);
+       pio->bi_iter.bi_sector = CLU_TO_SEC(ploop, clu);
        pio->bi_iter.bi_size = CLU_SIZE(ploop);
 }
 
@@ -204,17 +204,17 @@ static void wake_completion(struct pio *pio, void *data, 
blk_status_t status)
 }
 
 static int ploop_read_cluster_sync(struct ploop *ploop, struct pio *pio,
-                                  unsigned int dst_cluster)
+                                  unsigned int dst_clu)
 {
        DECLARE_COMPLETION(completion);
 
        init_pio(ploop, REQ_OP_READ, pio);
-       pio_prepare_offsets(ploop, pio, dst_cluster);
+       pio_prepare_offsets(ploop, pio, dst_clu);
 
        pio->endio_cb = wake_completion;
        pio->endio_cb_data = &completion;
 
-       map_and_submit_rw(ploop, dst_cluster, pio, top_level(ploop));
+       map_and_submit_rw(ploop, dst_clu, pio, top_level(ploop));
        wait_for_completion(&completion);
 
        if (pio->bi_status)
@@ -224,7 +224,7 @@ static int ploop_read_cluster_sync(struct ploop *ploop, 
struct pio *pio,
 }
 
 static int ploop_write_cluster_sync(struct ploop *ploop, struct pio *pio,
-                                   unsigned int dst_cluster)
+                                   unsigned int dst_clu)
 {
        struct file *file = top_delta(ploop)->file;
        DECLARE_COMPLETION(completion);
@@ -235,12 +235,12 @@ static int ploop_write_cluster_sync(struct ploop *ploop, 
struct pio *pio,
                return ret;
 
        init_pio(ploop, REQ_OP_WRITE, pio);
-       pio_prepare_offsets(ploop, pio, dst_cluster);
+       pio_prepare_offsets(ploop, pio, dst_clu);
 
        pio->endio_cb = wake_completion;
        pio->endio_cb_data = &completion;
 
-       map_and_submit_rw(ploop, dst_cluster, pio, top_level(ploop));
+       map_and_submit_rw(ploop, dst_clu, pio, top_level(ploop));
        wait_for_completion(&completion);
 
        if (pio->bi_status)
@@ -252,7 +252,7 @@ static int ploop_write_cluster_sync(struct ploop *ploop, 
struct pio *pio,
 
 static int ploop_write_zero_cluster_sync(struct ploop *ploop,
                                         struct pio *pio,
-                                        unsigned int cluster)
+                                        unsigned int clu)
 {
        void *data;
        int i;
@@ -263,45 +263,45 @@ static int ploop_write_zero_cluster_sync(struct ploop 
*ploop,
                kunmap_atomic(data);
        }
 
-       return ploop_write_cluster_sync(ploop, pio, cluster);
+       return ploop_write_cluster_sync(ploop, pio, clu);
 }
 
 static int ploop_grow_relocate_cluster(struct ploop *ploop,
                                       struct ploop_index_wb *piwb,
                                       struct ploop_cmd *cmd)
 {
-       unsigned int new_dst, cluster, dst_cluster;
+       unsigned int new_dst, clu, dst_clu;
        struct pio *pio = cmd->resize.pio;
        bool is_locked;
        int ret = 0;
 
-       dst_cluster = cmd->resize.dst_cluster;
+       dst_clu = cmd->resize.dst_clu;
 
-       /* Relocate cluster and update index */
-       cluster = ploop_find_bat_entry(ploop, dst_cluster, &is_locked);
-       if (cluster == UINT_MAX || is_locked) {
-               /* dst_cluster in top delta is not occupied? */
-               if (!test_bit(dst_cluster, ploop->holes_bitmap) || is_locked) {
+       /* Relocate clu and update index */
+       clu = ploop_find_bat_entry(ploop, dst_clu, &is_locked);
+       if (clu == UINT_MAX || is_locked) {
+               /* dst_clu in top delta is not occupied? */
+               if (!test_bit(dst_clu, ploop->holes_bitmap) || is_locked) {
                        WARN_ON_ONCE(1);
                        ret = -EIO;
                        goto out;
                }
                /* Cluster is free, occupy it. Skip relocaton */
-               ploop_hole_clear_bit(dst_cluster, ploop);
+               ploop_hole_clear_bit(dst_clu, ploop);
                goto not_occupied;
        }
 
-       /* Read full cluster sync */
-       ret = ploop_read_cluster_sync(ploop, pio, dst_cluster);
+       /* Read full clu sync */
+       ret = ploop_read_cluster_sync(ploop, pio, dst_clu);
        if (ret < 0)
                goto out;
 
-       ret = ploop_prepare_reloc_index_wb(ploop, piwb, cluster,
+       ret = ploop_prepare_reloc_index_wb(ploop, piwb, clu,
                                           &new_dst);
        if (ret < 0)
                goto out;
 
-       /* Write cluster to new destination */
+       /* Write clu to new destination */
        ret = ploop_write_cluster_sync(ploop, pio, new_dst);
        if (ret) {
                ploop_reset_bat_update(piwb);
@@ -317,18 +317,18 @@ static int ploop_grow_relocate_cluster(struct ploop 
*ploop,
 
        /* Update local BAT copy */
        write_lock_irq(&ploop->bat_rwlock);
-       WARN_ON(!try_update_bat_entry(ploop, cluster, top_level(ploop), 
new_dst));
+       WARN_ON(!try_update_bat_entry(ploop, clu, top_level(ploop), new_dst));
        write_unlock_irq(&ploop->bat_rwlock);
 not_occupied:
        /*
-        * Now dst_cluster is not referenced in BAT, so increase the value
+        * Now dst_clu is not referenced in BAT, so increase the value
         * for next iteration. The place we do this is significant: caller
         * makes rollback based on this.
         */
-       cmd->resize.dst_cluster++;
+       cmd->resize.dst_clu++;
 
        /* Zero new BAT entries on disk. */
-       ret = ploop_write_zero_cluster_sync(ploop, pio, dst_cluster);
+       ret = ploop_write_zero_cluster_sync(ploop, pio, dst_clu);
 out:
        return ret;
 }
@@ -389,13 +389,13 @@ static void ploop_add_md_pages(struct ploop *ploop, 
struct rb_root *from)
 /*
  * Here we relocate data clusters, which may intersect with BAT area
  * of disk after resize. For user they look as already written to disk,
- * so be careful(!) and protective. Update indexes only after cluster
+ * so be careful(!) and protective. Update indexes only after clu
  * data is written to disk.
  */
 static int process_resize_cmd(struct ploop *ploop, struct ploop_cmd *cmd)
 {
        struct ploop_index_wb piwb;
-       unsigned int dst_cluster;
+       unsigned int dst_clu;
        int ret = 0;
 
        ploop_index_wb_init(&piwb, ploop);
@@ -403,7 +403,7 @@ static int process_resize_cmd(struct ploop *ploop, struct 
ploop_cmd *cmd)
        /* Update memory arrays and hb_nr, but do not update nr_bat_entries. */
        ploop_advance_holes_bitmap(ploop, cmd);
 
-       while (cmd->resize.dst_cluster <= cmd->resize.end_dst_cluster) {
+       while (cmd->resize.dst_clu <= cmd->resize.end_dst_clu) {
                ret = ploop_grow_relocate_cluster(ploop, &piwb, cmd);
                if (ret)
                        goto out;
@@ -415,11 +415,11 @@ static int process_resize_cmd(struct ploop *ploop, struct 
ploop_cmd *cmd)
        write_lock_irq(&ploop->bat_rwlock);
        if (ret) {
                /* Cleanup: mark new BAT overages as free clusters */
-               dst_cluster = cmd->resize.dst_cluster - 1;
+               dst_clu = cmd->resize.dst_clu - 1;
 
-               while (dst_cluster >= cmd->resize.nr_old_bat_clu) {
-                       ploop_hole_set_bit(dst_cluster, ploop);
-                       dst_cluster--;
+               while (dst_clu >= cmd->resize.nr_old_bat_clu) {
+                       ploop_hole_set_bit(dst_clu, ploop);
+                       dst_clu--;
                }
                swap(ploop->hb_nr, cmd->resize.hb_nr);
        } else {
@@ -546,9 +546,9 @@ static int ploop_resize(struct ploop *ploop, sector_t 
new_sectors)
        if (!cmd.resize.pio)
                goto err;
 
-       cmd.resize.cluster = UINT_MAX;
-       cmd.resize.dst_cluster = nr_old_bat_clusters;
-       cmd.resize.end_dst_cluster = nr_bat_clusters - 1;
+       cmd.resize.clu = UINT_MAX;
+       cmd.resize.dst_clu = nr_old_bat_clusters;
+       cmd.resize.end_dst_clu = nr_bat_clusters - 1;
        cmd.resize.nr_old_bat_clu = nr_old_bat_clusters;
        cmd.resize.nr_bat_entries = nr_bat_entries;
        cmd.resize.hb_nr = hb_nr;
@@ -720,15 +720,15 @@ static void notify_delta_merged(struct ploop *ploop, u8 
level,
 static int process_update_delta_index(struct ploop *ploop, u8 level,
                                      const char *map)
 {
-       unsigned int cluster, dst_cluster, n;
+       unsigned int clu, dst_clu, n;
        int ret;
 
        write_lock_irq(&ploop->bat_rwlock);
        /* Check all */
-       while (sscanf(map, "%u:%u;%n", &cluster, &dst_cluster, &n) == 2) {
-               if (cluster >= ploop->nr_bat_entries)
+       while (sscanf(map, "%u:%u;%n", &clu, &dst_clu, &n) == 2) {
+               if (clu >= ploop->nr_bat_entries)
                        break;
-               if (ploop_bat_entries(ploop, cluster, NULL) == BAT_ENTRY_NONE)
+               if (ploop_bat_entries(ploop, clu, NULL) == BAT_ENTRY_NONE)
                        break;
                map += n;
        }
@@ -737,8 +737,8 @@ static int process_update_delta_index(struct ploop *ploop, 
u8 level,
                goto unlock;
        }
        /* Commit all */
-       while (sscanf(map, "%u:%u;%n", &cluster, &dst_cluster, &n) == 2) {
-               try_update_bat_entry(ploop, cluster, level, dst_cluster);
+       while (sscanf(map, "%u:%u;%n", &clu, &dst_clu, &n) == 2) {
+               try_update_bat_entry(ploop, clu, level, dst_clu);
                map += n;
        }
        ret = 0;
@@ -905,7 +905,7 @@ static int process_flip_upper_deltas(struct ploop *ploop)
 static int process_tracking_start(struct ploop *ploop, void *tracking_bitmap,
                                  u32 tb_nr)
 {
-       unsigned int i, nr_pages, end, *bat_entries, dst_cluster, nr;
+       unsigned int i, nr_pages, end, *bat_entries, dst_clu, nr;
        struct rb_node *node;
        struct md_page *md;
        int ret = 0;
@@ -923,15 +923,15 @@ static int process_tracking_start(struct ploop *ploop, 
void *tracking_bitmap,
                init_bat_entries_iter(ploop, md->id, &i, &end);
                bat_entries = kmap_atomic(md->page);
                for (; i <= end; i++) {
-                       dst_cluster = bat_entries[i];
-                       if (dst_cluster == BAT_ENTRY_NONE ||
+                       dst_clu = bat_entries[i];
+                       if (dst_clu == BAT_ENTRY_NONE ||
                            md->bat_levels[i] != top_level(ploop))
                                continue;
-                       if (WARN_ON(dst_cluster >= tb_nr)) {
+                       if (WARN_ON(dst_clu >= tb_nr)) {
                                ret = -EIO;
                                break;
                        }
-                       set_bit(dst_cluster, tracking_bitmap);
+                       set_bit(dst_clu, tracking_bitmap);
                }
                kunmap_atomic(bat_entries);
                if (ret)
@@ -972,9 +972,9 @@ static int tracking_get_next(struct ploop *ploop, char 
*result,
        return ret;
 }
 
-static unsigned int max_dst_cluster_in_top_delta(struct ploop *ploop)
+static unsigned int max_dst_clu_in_top_delta(struct ploop *ploop)
 {
-       unsigned int i, nr_pages, nr = 0, end, *bat_entries, dst_cluster = 0;
+       unsigned int i, nr_pages, nr = 0, end, *bat_entries, dst_clu = 0;
        struct rb_node *node;
        struct md_page *md;
 
@@ -985,9 +985,9 @@ static unsigned int max_dst_cluster_in_top_delta(struct 
ploop *ploop)
                init_bat_entries_iter(ploop, md->id, &i, &end);
                bat_entries = kmap_atomic(md->page);
                for (; i <= end; i++) {
-                       if (dst_cluster < bat_entries[i] &&
+                       if (dst_clu < bat_entries[i] &&
                            md->bat_levels[i] == top_level(ploop))
-                               dst_cluster = bat_entries[i];
+                               dst_clu = bat_entries[i];
                }
                kunmap_atomic(bat_entries);
                nr++;
@@ -995,7 +995,7 @@ static unsigned int max_dst_cluster_in_top_delta(struct 
ploop *ploop)
        read_unlock_irq(&ploop->bat_rwlock);
 
        BUG_ON(nr != nr_pages);
-       return dst_cluster;
+       return dst_clu;
 }
 
 static int ploop_tracking_cmd(struct ploop *ploop, const char *suffix,
@@ -1019,12 +1019,12 @@ static int ploop_tracking_cmd(struct ploop *ploop, 
const char *suffix,
                        return -EEXIST;
                if (ploop->maintaince)
                        return -EBUSY;
-               /* max_dst_cluster_in_top_delta() may be above hb_nr */
-               tb_nr = max_dst_cluster_in_top_delta(ploop) + 1;
+               /* max_dst_clu_in_top_delta() may be above hb_nr */
+               tb_nr = max_dst_clu_in_top_delta(ploop) + 1;
                if (tb_nr < ploop->hb_nr)
                        tb_nr = ploop->hb_nr;
                /*
-                * After max_dst_cluster_in_top_delta() unlocks the lock,
+                * After max_dst_clu_in_top_delta() unlocks the lock,
                 * new entries above tb_nr can't occur, since we always
                 * alloc clusters from holes_bitmap (and they nr < hb_nr).
                 */
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index ee221a84152d..a9c8a6d3dca2 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -86,29 +86,29 @@ void init_pio(struct ploop *ploop, unsigned int bi_op, 
struct pio *pio)
        INIT_LIST_HEAD(&pio->list);
        INIT_HLIST_NODE(&pio->hlist_node);
        INIT_LIST_HEAD(&pio->endio_list);
-       /* FIXME: assign real cluster? */
-       pio->cluster = UINT_MAX;
+       /* FIXME: assign real clu? */
+       pio->clu = UINT_MAX;
        pio->level = BAT_LEVEL_INVALID;
 }
 
-/* Get cluster related to pio sectors */
+/* Get clu related to pio sectors */
 static int ploop_pio_valid(struct ploop *ploop, struct pio *pio)
 {
        sector_t sector = pio->bi_iter.bi_sector;
-       unsigned int end_cluster;
+       unsigned int end_clu;
        loff_t end_byte;
 
        end_byte = to_bytes(sector) + pio->bi_iter.bi_size - 1;
-       end_cluster = POS_TO_CLU(ploop, end_byte);
+       end_clu = POS_TO_CLU(ploop, end_byte);
 
-       if (unlikely(end_cluster >= ploop->nr_bat_entries)) {
+       if (unlikely(end_clu >= ploop->nr_bat_entries)) {
                /*
                 * This mustn't happen, since we set max_io_len
                 * via dm_set_target_max_io_len().
                 */
                WARN_ONCE(1, "sec=%llu, size=%u, end_clu=%u, nr=%u\n",
                          sector, pio->bi_iter.bi_size,
-                         end_cluster, ploop->nr_bat_entries);
+                         end_clu, ploop->nr_bat_entries);
                return -EINVAL;
        }
 
@@ -256,7 +256,7 @@ void defer_pios(struct ploop *ploop, struct pio *pio, 
struct list_head *pio_list
        queue_work(ploop->wq, &ploop->worker);
 }
 
-void track_dst_cluster(struct ploop *ploop, u32 dst_cluster)
+void track_dst_cluster(struct ploop *ploop, u32 dst_clu)
 {
        unsigned long flags;
 
@@ -264,8 +264,8 @@ void track_dst_cluster(struct ploop *ploop, u32 dst_cluster)
                return;
 
        read_lock_irqsave(&ploop->bat_rwlock, flags);
-       if (ploop->tracking_bitmap && !WARN_ON(dst_cluster >= ploop->tb_nr))
-               set_bit(dst_cluster, ploop->tracking_bitmap);
+       if (ploop->tracking_bitmap && !WARN_ON(dst_clu >= ploop->tb_nr))
+               set_bit(dst_clu, ploop->tracking_bitmap);
        read_unlock_irqrestore(&ploop->bat_rwlock, flags);
 }
 
@@ -279,12 +279,12 @@ void track_dst_cluster(struct ploop *ploop, u32 
dst_cluster)
  */
 void __track_pio(struct ploop *ploop, struct pio *pio)
 {
-       unsigned int dst_cluster = SEC_TO_CLU(ploop, pio->bi_iter.bi_sector);
+       unsigned int dst_clu = SEC_TO_CLU(ploop, pio->bi_iter.bi_sector);
 
        if (!op_is_write(pio->bi_op) || !bvec_iter_sectors((pio)->bi_iter))
                return;
 
-       track_dst_cluster(ploop, dst_cluster);
+       track_dst_cluster(ploop, dst_clu);
 }
 
 static void queue_discard_index_wb(struct ploop *ploop, struct pio *pio)
@@ -326,23 +326,23 @@ struct pio *find_pio(struct hlist_head head[], u32 clu)
        BUG_ON(!slot);
 
        hlist_for_each_entry(pio, slot, hlist_node) {
-               if (pio->cluster == clu)
+               if (pio->clu == clu)
                        return pio;
        }
 
        return NULL;
 }
 
-static struct pio *find_inflight_bio(struct ploop *ploop, unsigned int cluster)
+static struct pio *find_inflight_bio(struct ploop *ploop, unsigned int clu)
 {
        lockdep_assert_held(&ploop->inflight_lock);
-       return find_pio(ploop->inflight_pios, cluster);
+       return find_pio(ploop->inflight_pios, clu);
 }
 
-struct pio *find_lk_of_cluster(struct ploop *ploop, unsigned int cluster)
+struct pio *find_lk_of_cluster(struct ploop *ploop, unsigned int clu)
 {
        lockdep_assert_held(&ploop->deferred_lock);
-       return find_pio(ploop->exclusive_pios, cluster);
+       return find_pio(ploop->exclusive_pios, clu);
 }
 
 static void add_endio_pio(struct pio *head, struct pio *pio)
@@ -388,7 +388,7 @@ static void link_pio(struct hlist_head head[], struct pio 
*pio,
 
        BUG_ON(!hlist_unhashed(&pio->hlist_node));
        hlist_add_head(&pio->hlist_node, slot);
-       pio->cluster = clu;
+       pio->clu = clu;
 }
 
 /*
@@ -405,12 +405,12 @@ static void unlink_pio(struct ploop *ploop, struct pio 
*pio,
        list_splice_tail_init(&pio->endio_list, pio_list);
 }
 
-static void add_cluster_lk(struct ploop *ploop, struct pio *pio, u32 cluster)
+static void add_cluster_lk(struct ploop *ploop, struct pio *pio, u32 clu)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&ploop->deferred_lock, flags);
-       link_pio(ploop->exclusive_pios, pio, cluster, true);
+       link_pio(ploop->exclusive_pios, pio, clu, true);
        spin_unlock_irqrestore(&ploop->deferred_lock, flags);
 }
 static void del_cluster_lk(struct ploop *ploop, struct pio *pio)
@@ -433,12 +433,12 @@ static void del_cluster_lk(struct ploop *ploop, struct 
pio *pio)
 }
 
 static void link_submitting_pio(struct ploop *ploop, struct pio *pio,
-                               unsigned int cluster)
+                               unsigned int clu)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&ploop->inflight_lock, flags);
-       link_pio(ploop->inflight_pios, pio, cluster, false);
+       link_pio(ploop->inflight_pios, pio, clu, false);
        spin_unlock_irqrestore(&ploop->inflight_lock, flags);
 }
 static void unlink_completed_pio(struct ploop *ploop, struct pio *pio)
@@ -501,7 +501,7 @@ static int punch_hole(struct file *file, loff_t pos, loff_t 
len)
 }
 
 static void handle_discard_pio(struct ploop *ploop, struct pio *pio,
-                    unsigned int cluster, unsigned int dst_cluster)
+                    unsigned int clu, unsigned int dst_clu)
 {
        struct pio *inflight_h;
        unsigned long flags;
@@ -520,7 +520,7 @@ static void handle_discard_pio(struct ploop *ploop, struct 
pio *pio,
                return;
        }
 
-       if (!cluster_is_in_top_delta(ploop, cluster)) {
+       if (!cluster_is_in_top_delta(ploop, clu)) {
                pio_endio(pio);
                return;
        }
@@ -530,7 +530,7 @@ static void handle_discard_pio(struct ploop *ploop, struct 
pio *pio,
                goto punch_hole;
 
        spin_lock_irqsave(&ploop->inflight_lock, flags);
-       inflight_h = find_inflight_bio(ploop, cluster);
+       inflight_h = find_inflight_bio(ploop, clu);
        if (inflight_h)
                add_endio_pio(inflight_h, pio);
        spin_unlock_irqrestore(&ploop->inflight_lock, flags);
@@ -541,11 +541,11 @@ static void handle_discard_pio(struct ploop *ploop, 
struct pio *pio,
                return;
        }
 
-       add_cluster_lk(ploop, pio, cluster);
+       add_cluster_lk(ploop, pio, clu);
        pio->wants_discard_index_cleanup = true;
 
 punch_hole:
-       remap_to_cluster(ploop, pio, dst_cluster);
+       remap_to_cluster(ploop, pio, dst_clu);
        pos = to_bytes(pio->bi_iter.bi_sector);
        ret = punch_hole(top_delta(ploop)->file, pos, pio->bi_iter.bi_size);
        if (ret || ploop->nr_deltas != 1) {
@@ -578,7 +578,7 @@ static void queue_or_fail(struct ploop *ploop, int err, 
void *data)
 
 static void complete_cow(struct ploop_cow *cow, blk_status_t bi_status)
 {
-       unsigned int dst_cluster = cow->dst_cluster;
+       unsigned int dst_clu = cow->dst_clu;
        struct pio *aux_pio = cow->aux_pio;
        struct ploop *ploop = cow->ploop;
        unsigned long flags;
@@ -589,9 +589,9 @@ static void complete_cow(struct ploop_cow *cow, 
blk_status_t bi_status)
 
        del_cluster_lk(ploop, cow_pio);
 
-       if (dst_cluster != BAT_ENTRY_NONE && bi_status != BLK_STS_OK) {
+       if (dst_clu != BAT_ENTRY_NONE && bi_status != BLK_STS_OK) {
                read_lock_irqsave(&ploop->bat_rwlock, flags);
-               ploop_hole_set_bit(dst_cluster, ploop);
+               ploop_hole_set_bit(dst_clu, ploop);
                read_unlock_irqrestore(&ploop->bat_rwlock, flags);
        }
 
@@ -603,38 +603,38 @@ static void complete_cow(struct ploop_cow *cow, 
blk_status_t bi_status)
 }
 
 static void ploop_release_cluster(struct ploop *ploop,
-                                 unsigned int cluster)
+                                 unsigned int clu)
 {
-       unsigned int id, *bat_entries, dst_cluster;
+       unsigned int id, *bat_entries, dst_clu;
        struct md_page *md;
 
        lockdep_assert_held(&ploop->bat_rwlock);
 
-       id = bat_clu_to_page_nr(cluster);
+       id = bat_clu_to_page_nr(clu);
         md = md_page_find(ploop, id);
         BUG_ON(!md);
 
-       cluster = bat_clu_idx_in_page(cluster); /* relative to page */
+       clu = bat_clu_idx_in_page(clu); /* relative to page */
 
        bat_entries = kmap_atomic(md->page);
-       dst_cluster = bat_entries[cluster];
-       bat_entries[cluster] = BAT_ENTRY_NONE;
-       md->bat_levels[cluster] = 0;
+       dst_clu = bat_entries[clu];
+       bat_entries[clu] = BAT_ENTRY_NONE;
+       md->bat_levels[clu] = 0;
        kunmap_atomic(bat_entries);
 
-       ploop_hole_set_bit(dst_cluster, ploop);
+       ploop_hole_set_bit(dst_clu, ploop);
 }
 
 static void piwb_discard_completed(struct ploop *ploop, bool success,
-                 unsigned int cluster, unsigned int new_dst_cluster)
+                 unsigned int clu, unsigned int new_dst_clu)
 {
-       if (new_dst_cluster)
+       if (new_dst_clu)
                return;
 
-       if (cluster_is_in_top_delta(ploop, cluster)) {
+       if (cluster_is_in_top_delta(ploop, clu)) {
                WARN_ON_ONCE(ploop->nr_deltas != 1);
                if (success)
-                       ploop_release_cluster(ploop, cluster);
+                       ploop_release_cluster(ploop, clu);
        }
 }
 
@@ -650,7 +650,7 @@ static void ploop_advance_local_after_bat_wb(struct ploop 
*ploop,
 {
        struct md_page *md = md_page_find(ploop, piwb->page_nr);
        unsigned int i, last, *bat_entries;
-       map_index_t *dst_cluster, off;
+       map_index_t *dst_clu, off;
        unsigned long flags;
 
        BUG_ON(!md);
@@ -668,25 +668,25 @@ static void ploop_advance_local_after_bat_wb(struct ploop 
*ploop,
        if (!piwb->page_nr)
                i = PLOOP_MAP_OFFSET;
 
-       dst_cluster = kmap_atomic(piwb->bat_page);
+       dst_clu = kmap_atomic(piwb->bat_page);
        ploop_bat_lock(ploop, success, flags);
 
        for (; i < last; i++) {
                if (piwb->type == PIWB_TYPE_DISCARD) {
-                       piwb_discard_completed(ploop, success, i + off, 
dst_cluster[i]);
+                       piwb_discard_completed(ploop, success, i + off, 
dst_clu[i]);
                        continue;
                }
 
-               if (!dst_cluster[i])
+               if (!dst_clu[i])
                        continue;
 
                if (cluster_is_in_top_delta(ploop, i + off) && piwb->type == 
PIWB_TYPE_ALLOC) {
-                       WARN_ON(bat_entries[i] != dst_cluster[i]);
+                       WARN_ON(bat_entries[i] != dst_clu[i]);
                        continue;
                }
 
                if (success) {
-                       bat_entries[i] = dst_cluster[i];
+                       bat_entries[i] = dst_clu[i];
                        md->bat_levels[i] = top_level(ploop);
                } else {
                        /*
@@ -700,7 +700,7 @@ static void ploop_advance_local_after_bat_wb(struct ploop 
*ploop,
        }
 
        ploop_bat_unlock(ploop, success, flags);
-       kunmap_atomic(dst_cluster);
+       kunmap_atomic(dst_clu);
        kunmap_atomic(bat_entries);
 }
 
@@ -830,28 +830,28 @@ void ploop_reset_bat_update(struct ploop_index_wb *piwb)
 
 static void ploop_bat_page_zero_cluster(struct ploop *ploop,
                                        struct ploop_index_wb *piwb,
-                                       unsigned int cluster)
+                                       unsigned int clu)
 {
        map_index_t *to;
 
        /* Cluster index related to the page[page_nr] start */
-       cluster = bat_clu_idx_in_page(cluster);
+       clu = bat_clu_idx_in_page(clu);
 
        to = kmap_atomic(piwb->bat_page);
-       to[cluster] = 0;
+       to[clu] = 0;
        kunmap_atomic(to);
 }
 
-static int find_dst_cluster_bit(struct ploop *ploop,
-                     unsigned int *ret_dst_cluster)
+static int find_dst_clu_bit(struct ploop *ploop,
+                     unsigned int *ret_dst_clu)
 {
-       unsigned int dst_cluster;
+       unsigned int dst_clu;
 
-       /* Find empty cluster */
-       dst_cluster = find_first_bit(ploop->holes_bitmap, ploop->hb_nr);
-       if (dst_cluster >= ploop->hb_nr)
+       /* Find empty clu */
+       dst_clu = find_first_bit(ploop->holes_bitmap, ploop->hb_nr);
+       if (dst_clu >= ploop->hb_nr)
                return -EIO;
-       *ret_dst_cluster = dst_cluster;
+       *ret_dst_clu = dst_clu;
        return 0;
 }
 
@@ -882,7 +882,7 @@ static int truncate_prealloc_safe(struct ploop_delta 
*delta, loff_t len, const c
        return 0;
 }
 
-static int allocate_cluster(struct ploop *ploop, unsigned int *dst_cluster)
+static int allocate_cluster(struct ploop *ploop, unsigned int *dst_clu)
 {
        struct ploop_delta *top = top_delta(ploop);
        u32 clu_size = CLU_SIZE(ploop);
@@ -890,10 +890,10 @@ static int allocate_cluster(struct ploop *ploop, unsigned 
int *dst_cluster)
        struct file *file = top->file;
        int ret;
 
-       if (find_dst_cluster_bit(ploop, dst_cluster) < 0)
+       if (find_dst_clu_bit(ploop, dst_clu) < 0)
                return -EIO;
 
-       pos = CLU_TO_POS(ploop, *dst_cluster);
+       pos = CLU_TO_POS(ploop, *dst_clu);
        end = pos + clu_size;
        old_size = top->file_size;
 
@@ -925,20 +925,20 @@ static int allocate_cluster(struct ploop *ploop, unsigned 
int *dst_cluster)
        if (end > top->file_preallocated_area_start)
                top->file_preallocated_area_start = end;
        /*
-        * Mark cluster as used. Find & clear bit is unlocked,
+        * Mark clu as used. Find & clear bit is unlocked,
         * since currently this may be called only from deferred
         * kwork. Note, that set_bit may be made from many places.
         */
-       ploop_hole_clear_bit(*dst_cluster, ploop);
+       ploop_hole_clear_bit(*dst_clu, ploop);
        return 0;
 }
 
 /*
- * This finds a free dst_cluster on origin device, and reflects this
+ * This finds a free dst_clu on origin device, and reflects this
  * in ploop->holes_bitmap and bat_page.
  */
 static int ploop_alloc_cluster(struct ploop *ploop, struct ploop_index_wb 
*piwb,
-                              unsigned int cluster, unsigned int *dst_cluster)
+                              unsigned int clu, unsigned int *dst_clu)
 {
        struct page *page = piwb->bat_page;
        bool already_alloced = false;
@@ -946,12 +946,12 @@ static int ploop_alloc_cluster(struct ploop *ploop, 
struct ploop_index_wb *piwb,
        int ret = 0;
 
        /* Cluster index related to the page[page_nr] start */
-       cluster -= piwb->page_nr * PAGE_SIZE / sizeof(map_index_t) - 
PLOOP_MAP_OFFSET;
+       clu -= piwb->page_nr * PAGE_SIZE / sizeof(map_index_t) - 
PLOOP_MAP_OFFSET;
 
        to = kmap_atomic(page);
-       if (to[cluster]) {
+       if (to[clu]) {
                /* Already mapped by one of previous bios */
-               *dst_cluster = to[cluster];
+               *dst_clu = to[clu];
                already_alloced = true;
        }
        kunmap_atomic(to);
@@ -959,13 +959,13 @@ static int ploop_alloc_cluster(struct ploop *ploop, 
struct ploop_index_wb *piwb,
        if (already_alloced)
                goto out;
 
-       if (allocate_cluster(ploop, dst_cluster) < 0) {
+       if (allocate_cluster(ploop, dst_clu) < 0) {
                ret = -EIO;
                goto out;
        }
 
        to = kmap_atomic(page);
-       to[cluster] = *dst_cluster;
+       to[clu] = *dst_clu;
        kunmap_atomic(to);
 out:
        return ret;
@@ -1125,16 +1125,16 @@ void map_and_submit_rw(struct ploop *ploop, u32 
dst_clu, struct pio *pio, u8 lev
 }
 
 static void initiate_delta_read(struct ploop *ploop, unsigned int level,
-                               unsigned int dst_cluster, struct pio *pio)
+                               unsigned int dst_clu, struct pio *pio)
 {
-       if (dst_cluster == BAT_ENTRY_NONE) {
-               /* No one delta contains dst_cluster. */
+       if (dst_clu == BAT_ENTRY_NONE) {
+               /* No one delta contains dst_clu. */
                zero_fill_pio(pio);
                pio_endio(pio);
                return;
        }
 
-       map_and_submit_rw(ploop, dst_cluster, pio, level);
+       map_and_submit_rw(ploop, dst_clu, pio, level);
 }
 
 static void ploop_cow_endio(struct pio *aux_pio, void *data, blk_status_t 
bi_status)
@@ -1151,12 +1151,12 @@ static void ploop_cow_endio(struct pio *aux_pio, void 
*data, blk_status_t bi_sta
 }
 
 static bool postpone_if_cluster_locked(struct ploop *ploop, struct pio *pio,
-                                      unsigned int cluster)
+                                      unsigned int clu)
 {
        struct pio *e_h; /* Exclusively locked */
 
        spin_lock_irq(&ploop->deferred_lock);
-       e_h = find_lk_of_cluster(ploop, cluster);
+       e_h = find_lk_of_cluster(ploop, clu);
        if (e_h)
                add_endio_pio(e_h, pio);
        spin_unlock_irq(&ploop->deferred_lock);
@@ -1165,7 +1165,7 @@ static bool postpone_if_cluster_locked(struct ploop 
*ploop, struct pio *pio,
 }
 
 static int submit_cluster_cow(struct ploop *ploop, unsigned int level,
-                             unsigned int cluster, unsigned int dst_cluster,
+                             unsigned int clu, unsigned int dst_clu,
                              struct pio *cow_pio)
 {
        struct ploop_cow *cow = NULL;
@@ -1177,19 +1177,19 @@ static int submit_cluster_cow(struct ploop *ploop, 
unsigned int level,
        if (!aux_pio || !cow)
                goto err;
        init_pio(ploop, REQ_OP_READ, aux_pio);
-       pio_prepare_offsets(ploop, aux_pio, cluster);
+       pio_prepare_offsets(ploop, aux_pio, clu);
        aux_pio->endio_cb = ploop_cow_endio;
        aux_pio->endio_cb_data = cow;
 
        cow->ploop = ploop;
-       cow->dst_cluster = BAT_ENTRY_NONE;
+       cow->dst_clu = BAT_ENTRY_NONE;
        cow->aux_pio = aux_pio;
        cow->cow_pio = cow_pio;
 
-       add_cluster_lk(ploop, cow_pio, cluster);
+       add_cluster_lk(ploop, cow_pio, clu);
 
-       /* Stage #0: read secondary delta full cluster */
-       map_and_submit_rw(ploop, dst_cluster, aux_pio, level);
+       /* Stage #0: read secondary delta full clu */
+       map_and_submit_rw(ploop, dst_clu, aux_pio, level);
        return 0;
 err:
        if (aux_pio)
@@ -1199,9 +1199,9 @@ static int submit_cluster_cow(struct ploop *ploop, 
unsigned int level,
 }
 
 static void initiate_cluster_cow(struct ploop *ploop, unsigned int level,
-               unsigned int cluster, unsigned int dst_cluster, struct pio *pio)
+               unsigned int clu, unsigned int dst_clu, struct pio *pio)
 {
-       if (!submit_cluster_cow(ploop, level, cluster, dst_cluster, pio))
+       if (!submit_cluster_cow(ploop, level, clu, dst_clu, pio))
                return;
 
        pio->bi_status = BLK_STS_RESOURCE;
@@ -1212,20 +1212,20 @@ static void submit_cluster_write(struct ploop_cow *cow)
 {
        struct pio *pio = cow->aux_pio;
        struct ploop *ploop = cow->ploop;
-       unsigned int dst_cluster;
+       unsigned int dst_clu;
 
-       if (allocate_cluster(ploop, &dst_cluster) < 0)
+       if (allocate_cluster(ploop, &dst_clu) < 0)
                goto error;
-       cow->dst_cluster = dst_cluster;
+       cow->dst_clu = dst_clu;
 
        init_pio(ploop, REQ_OP_WRITE, pio);
-       pio_prepare_offsets(ploop, pio, dst_cluster);
+       pio_prepare_offsets(ploop, pio, dst_clu);
 
        BUG_ON(irqs_disabled());
        pio->endio_cb = ploop_cow_endio;
        pio->endio_cb_data = cow;
 
-       map_and_submit_rw(ploop, dst_cluster, pio, top_level(ploop));
+       map_and_submit_rw(ploop, dst_clu, pio, top_level(ploop));
        return;
 error:
        complete_cow(cow, BLK_STS_IOERR);
@@ -1235,12 +1235,12 @@ static void submit_cow_index_wb(struct ploop_cow *cow,
                                struct ploop_index_wb *piwb)
 {
        struct pio *cow_pio = cow->cow_pio;
-       unsigned int cluster = cow_pio->cluster;
+       unsigned int clu = cow_pio->clu;
        struct ploop *ploop = cow->ploop;
        unsigned int page_nr;
        map_index_t *to;
 
-       page_nr = bat_clu_to_page_nr(cluster);
+       page_nr = bat_clu_to_page_nr(clu);
 
        if (piwb->page_nr == PAGE_NR_NONE) {
                /* No index wb in process. Prepare a new one */
@@ -1258,15 +1258,15 @@ static void submit_cow_index_wb(struct ploop_cow *cow,
                goto out;
        }
 
-       cluster -= page_nr * PAGE_SIZE / sizeof(map_index_t) - PLOOP_MAP_OFFSET;
+       clu -= page_nr * PAGE_SIZE / sizeof(map_index_t) - PLOOP_MAP_OFFSET;
 
        to = kmap_atomic(piwb->bat_page);
-       WARN_ON(to[cluster]);
-       to[cluster] = cow->dst_cluster;
+       WARN_ON(to[clu]);
+       to[clu] = cow->dst_clu;
        kunmap_atomic(to);
 
        /* Prevent double clearing of holes_bitmap bit on complete_cow() */
-       cow->dst_cluster = BAT_ENTRY_NONE;
+       cow->dst_clu = BAT_ENTRY_NONE;
        spin_lock_irq(&ploop->deferred_lock);
        list_add_tail(&cow->aux_pio->list, &piwb->cow_list);
        spin_unlock_irq(&ploop->deferred_lock);
@@ -1294,9 +1294,9 @@ static void process_delta_wb(struct ploop *ploop, struct 
ploop_index_wb *piwb)
                        continue;
                }
 
-               if (cow->dst_cluster == BAT_ENTRY_NONE) {
+               if (cow->dst_clu == BAT_ENTRY_NONE) {
                        /*
-                        * Stage #1: assign dst_cluster and write data
+                        * Stage #1: assign dst_clu and write data
                         * to top delta.
                         */
                        submit_cluster_write(cow);
@@ -1313,7 +1313,7 @@ static void process_delta_wb(struct ploop *ploop, struct 
ploop_index_wb *piwb)
 }
 
 /*
- * This allocates a new cluster (if cluster wb is not pending yet),
+ * This allocates a new clu (if clu wb is not pending yet),
  * or tries to attach a bio to a planned page index wb.
  *
  * We want to update BAT indexes in batch, but we don't want to delay data
@@ -1323,20 +1323,20 @@ static void process_delta_wb(struct ploop *ploop, 
struct ploop_index_wb *piwb)
  * Original bio->bi_end_io mustn't be called before index wb is completed.
  * We handle this in ploop_attach_end_action() by specific callback
  * for ploop_data_pio_end().
- * Note: cluster newer becomes locked here, since index update is called
+ * Note: clu newer becomes locked here, since index update is called
  * synchronously. Keep in mind this in case you make it async.
  */
 static bool locate_new_cluster_and_attach_pio(struct ploop *ploop,
                                              struct ploop_index_wb *piwb,
-                                             unsigned int cluster,
-                                             unsigned int *dst_cluster,
+                                             unsigned int clu,
+                                             unsigned int *dst_clu,
                                              struct pio *pio)
 {
        bool bat_update_prepared = false;
        bool attached = false;
        unsigned int page_nr;
 
-       page_nr = bat_clu_to_page_nr(cluster);
+       page_nr = bat_clu_to_page_nr(clu);
 
        if (piwb->page_nr == PAGE_NR_NONE) {
                /* No index wb in process. Prepare a new one */
@@ -1353,7 +1353,7 @@ static bool locate_new_cluster_and_attach_pio(struct 
ploop *ploop,
                goto out;
        }
 
-       if (ploop_alloc_cluster(ploop, piwb, cluster, dst_cluster)) {
+       if (ploop_alloc_cluster(ploop, piwb, clu, dst_clu)) {
                pio->bi_status = BLK_STS_IOERR;
                goto error;
        }
@@ -1362,7 +1362,7 @@ static bool locate_new_cluster_and_attach_pio(struct 
ploop *ploop,
        if (!attached) {
                /*
                 * Could not prepare data pio to be submitted before index wb
-                * batch? Delay submitting. Good thing, that cluster allocation
+                * batch? Delay submitting. Good thing, that clu allocation
                 * has already made, and it goes in the batch.
                 */
                defer_pios(ploop, pio, NULL);
@@ -1381,7 +1381,7 @@ static int process_one_deferred_bio(struct ploop *ploop, 
struct pio *pio,
                                    struct ploop_index_wb *piwb)
 {
        sector_t sector = pio->bi_iter.bi_sector;
-       unsigned int cluster, dst_cluster;
+       unsigned int clu, dst_clu;
        u8 level;
        bool ret;
 
@@ -1391,18 +1391,18 @@ static int process_one_deferred_bio(struct ploop 
*ploop, struct pio *pio,
         * ploop_advance_local_after_bat_wb(), which we start
         * and wait synchronously from *this* kwork.
         */
-       cluster = SEC_TO_CLU(ploop, sector);
-       dst_cluster = ploop_bat_entries(ploop, cluster, &level);
+       clu = SEC_TO_CLU(ploop, sector);
+       dst_clu = ploop_bat_entries(ploop, clu, &level);
 
-       if (postpone_if_cluster_locked(ploop, pio, cluster))
+       if (postpone_if_cluster_locked(ploop, pio, clu))
                goto out;
 
        if (op_is_discard(pio->bi_op)) {
-               handle_discard_pio(ploop, pio, cluster, dst_cluster);
+               handle_discard_pio(ploop, pio, clu, dst_clu);
                goto out;
        }
 
-       if (cluster_is_in_top_delta(ploop, cluster)) {
+       if (cluster_is_in_top_delta(ploop, clu)) {
                /* Already mapped */
                if (pio_endio_if_merge_fake_pio(pio))
                        goto out;
@@ -1410,18 +1410,18 @@ static int process_one_deferred_bio(struct ploop 
*ploop, struct pio *pio,
        } else if (!op_is_write(pio->bi_op)) {
                /*
                 * Simple read from secondary delta. May fail.
-                * (Also handles the case dst_cluster == BAT_ENTRY_NONE).
+                * (Also handles the case dst_clu == BAT_ENTRY_NONE).
                 */
-               initiate_delta_read(ploop, level, dst_cluster, pio);
+               initiate_delta_read(ploop, level, dst_clu, pio);
                goto out;
-       } else if (dst_cluster != BAT_ENTRY_NONE) {
+       } else if (dst_clu != BAT_ENTRY_NONE) {
                /*
                 * Read secondary delta and write to top delta. May fail.
-                * Yes, we can optimize the whole-cluster-write case and
+                * Yes, we can optimize the whole-clu-write case and
                 * a lot of other corner cases, but we don't do that as
                 * snapshots are used and COW occurs very rare.
                 */
-               initiate_cluster_cow(ploop, level, cluster, dst_cluster, pio);
+               initiate_cluster_cow(ploop, level, clu, dst_clu, pio);
                goto out;
        }
 
@@ -1429,14 +1429,14 @@ static int process_one_deferred_bio(struct ploop 
*ploop, struct pio *pio,
                goto out;
 
        /* Cluster exists nowhere. Allocate it and setup pio as outrunning */
-       ret = locate_new_cluster_and_attach_pio(ploop, piwb, cluster,
-                                               &dst_cluster, pio);
+       ret = locate_new_cluster_and_attach_pio(ploop, piwb, clu,
+                                               &dst_clu, pio);
        if (!ret)
                goto out;
 queue:
-       link_submitting_pio(ploop, pio, cluster);
+       link_submitting_pio(ploop, pio, clu);
 
-       map_and_submit_rw(ploop, dst_cluster, pio, top_level(ploop));
+       map_and_submit_rw(ploop, dst_clu, pio, top_level(ploop));
 out:
        return 0;
 }
@@ -1445,7 +1445,7 @@ void ploop_submit_index_wb_sync(struct ploop *ploop,
                                struct ploop_index_wb *piwb)
 {
        blk_status_t status = BLK_STS_OK;
-       u32 dst_cluster;
+       u32 dst_clu;
        int ret;
 
        /* track_bio() will be called in ploop_bat_write_complete() */
@@ -1455,8 +1455,8 @@ void ploop_submit_index_wb_sync(struct ploop *ploop,
        if (ret)
                status = errno_to_blk_status(ret);
 
-       dst_cluster = ((u64)piwb->page_nr << PAGE_SHIFT) / CLU_SIZE(ploop);
-       track_dst_cluster(ploop, dst_cluster);
+       dst_clu = ((u64)piwb->page_nr << PAGE_SHIFT) / CLU_SIZE(ploop);
+       track_dst_cluster(ploop, dst_clu);
 
        ploop_bat_write_complete(piwb, status);
        wait_for_completion(&piwb->comp);
@@ -1474,14 +1474,14 @@ static void process_deferred_pios(struct ploop *ploop, 
struct list_head *pios,
 static int process_one_discard_pio(struct ploop *ploop, struct pio *pio,
                                   struct ploop_index_wb *piwb)
 {
-       unsigned int page_nr, cluster;
+       unsigned int page_nr, clu;
        bool bat_update_prepared;
        map_index_t *to;
 
        WARN_ON(ploop->nr_deltas != 1);
 
-       cluster = pio->cluster;
-       page_nr = bat_clu_to_page_nr(cluster);
+       clu = pio->clu;
+       page_nr = bat_clu_to_page_nr(clu);
        bat_update_prepared = false;
 
        if (piwb->page_nr == PAGE_NR_NONE) {
@@ -1501,16 +1501,16 @@ static int process_one_discard_pio(struct ploop *ploop, 
struct pio *pio,
        }
 
        /* Cluster index related to the page[page_nr] start */
-       cluster -= piwb->page_nr * PAGE_SIZE / sizeof(map_index_t) - 
PLOOP_MAP_OFFSET;
+       clu -= piwb->page_nr * PAGE_SIZE / sizeof(map_index_t) - 
PLOOP_MAP_OFFSET;
 
        to = kmap_atomic(piwb->bat_page);
-       if (WARN_ON_ONCE(!to[cluster])) {
+       if (WARN_ON_ONCE(!to[clu])) {
                pio->bi_status = BLK_STS_IOERR;
                pio_endio(pio);
                if (bat_update_prepared)
                        ploop_reset_bat_update(piwb);
        } else {
-               to[cluster] = 0;
+               to[clu] = 0;
                list_add_tail(&pio->list, &piwb->ready_data_pios);
        }
        kunmap_atomic(to);
@@ -1758,31 +1758,31 @@ static void handle_cleanup(struct ploop *ploop, struct 
pio *pio)
 
 /*
  * Prepare simple index writeback without attached data bios.
- * In case of @dst_cluster is passed, this tryes to allocate
+ * In case of @dst_clu is passed, this tryes to allocate
  * another index instead of existing. If so, management of
- * old bat_entries[@cluster] and of related holes_bitmap bit
+ * old bat_entries[@clu] and of related holes_bitmap bit
  * is caller duty.
  */
 int ploop_prepare_reloc_index_wb(struct ploop *ploop,
                                 struct ploop_index_wb *piwb,
-                                unsigned int cluster,
-                                unsigned int *dst_cluster)
+                                unsigned int clu,
+                                unsigned int *dst_clu)
 {
-       unsigned int page_nr = bat_clu_to_page_nr(cluster);
+       unsigned int page_nr = bat_clu_to_page_nr(clu);
 
        if (piwb->page_nr != PAGE_NR_NONE ||
            ploop_prepare_bat_update(ploop, page_nr, piwb))
                goto out_eio;
-       if (dst_cluster) {
+       if (dst_clu) {
                /*
                 * For ploop_advance_local_after_bat_wb(): do not concern
-                * about bat_cluster[@cluster] is set. Zero bat_page[@cluster],
-                * to make ploop_alloc_cluster() allocate new dst_cluster from
+                * about bat_cluster[@clu] is set. Zero bat_page[@clu],
+                * to make ploop_alloc_cluster() allocate new dst_clu from
                 * holes_bitmap.
                 */
                piwb->type = PIWB_TYPE_RELOC;
-               ploop_bat_page_zero_cluster(ploop, piwb, cluster);
-               if (ploop_alloc_cluster(ploop, piwb, cluster, dst_cluster))
+               ploop_bat_page_zero_cluster(ploop, piwb, clu);
+               if (ploop_alloc_cluster(ploop, piwb, clu, dst_clu))
                        goto out_reset;
        }
 
diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
index 307dfe6135fe..9192d96ce64d 100644
--- a/drivers/md/dm-ploop.h
+++ b/drivers/md/dm-ploop.h
@@ -62,16 +62,16 @@ struct ploop_cmd {
                        unsigned int stage;
                        unsigned int nr_bat_entries;
                        unsigned int hb_nr;
-                       unsigned int end_dst_cluster;
+                       unsigned int end_dst_clu;
                        unsigned int nr_old_bat_clu;
-                       unsigned int cluster, dst_cluster;
+                       unsigned int clu, dst_clu;
                        struct pio *pio;
                } resize;
        };
 };
 
 #define PAGE_NR_NONE           UINT_MAX
-/* We can't use 0 for unmapped clusters, since RAW image references 0 cluster 
*/
+/* We can't use 0 for unmapped clusters, since RAW image references 0 clu */
 #define BAT_ENTRY_NONE         UINT_MAX
 
 #define PLOOP_INFLIGHT_TIMEOUT (60 * HZ)
@@ -88,7 +88,7 @@ struct ploop_cmd {
 
 enum piwb_type {
        PIWB_TYPE_ALLOC = 0,    /* Allocation of new clusters */
-       PIWB_TYPE_RELOC,        /* Relocation of cluster (on BAT grow) */
+       PIWB_TYPE_RELOC,        /* Relocation of clu (on BAT grow) */
        PIWB_TYPE_DISCARD,      /* Zeroing index on discard */
 };
 
@@ -142,12 +142,12 @@ struct ploop {
        /*
         * Hash table to link non-exclusive submitted bios.
         * This is needed for discard to check, nobody uses
-        * the discarding cluster.
+        * the discarding clu.
         */
        struct hlist_head *inflight_pios;
        /*
         * Hash table to link exclusive submitted bios.
-        * This allows to delay bios going in some cluster.
+        * This allows to delay bios going in some clu.
         */
        struct hlist_head *exclusive_pios;
 
@@ -227,7 +227,7 @@ struct pio {
        ploop_endio_t endio_cb;
        void *endio_cb_data;
 
-       unsigned int cluster;
+       unsigned int clu;
        u8 level;
 
        bool is_data_alloc:1;
@@ -254,7 +254,7 @@ struct pio {
 struct ploop_cow {
        struct ploop *ploop;
        struct pio *aux_pio;
-       unsigned int dst_cluster;
+       unsigned int dst_clu;
 
        struct pio *cow_pio;
 };
@@ -275,10 +275,10 @@ static inline bool ploop_is_ro(struct ploop *ploop)
 }
 
 static inline void remap_to_cluster(struct ploop *ploop, struct pio *pio,
-                                   unsigned int cluster)
+                                   unsigned int clu)
 {
        pio->bi_iter.bi_sector &= ((1 << ploop->cluster_log) - 1);
-       pio->bi_iter.bi_sector |= (cluster << ploop->cluster_log);
+       pio->bi_iter.bi_sector |= (clu << ploop->cluster_log);
 }
 
 static inline bool whole_cluster(struct ploop *ploop, struct pio *pio)
@@ -348,17 +348,17 @@ static inline unsigned int ploop_nr_bat_clusters(struct 
ploop *ploop,
        return bat_clusters;
 }
 
-static inline unsigned int bat_clu_to_page_nr(unsigned int cluster)
+static inline unsigned int bat_clu_to_page_nr(unsigned int clu)
 {
        unsigned int byte;
 
-       byte = (cluster + PLOOP_MAP_OFFSET) * sizeof(map_index_t);
+       byte = (clu + PLOOP_MAP_OFFSET) * sizeof(map_index_t);
        return byte >> PAGE_SHIFT;
 }
 
-static inline unsigned int bat_clu_idx_in_page(unsigned int cluster)
+static inline unsigned int bat_clu_idx_in_page(unsigned int clu)
 {
-       return (cluster + PLOOP_MAP_OFFSET) % (PAGE_SIZE / sizeof(map_index_t));
+       return (clu + PLOOP_MAP_OFFSET) % (PAGE_SIZE / sizeof(map_index_t));
 }
 
 static inline unsigned int page_clu_idx_to_bat_clu(unsigned int page_id,
@@ -373,63 +373,63 @@ extern struct md_page * md_page_find(struct ploop *ploop, 
unsigned int id);
 
 /*
  * This should be called in very rare cases. Avoid this function
- * in cycles by cluster, use ploop_for_each_md_page()-based
+ * in cycles by clu, use ploop_for_each_md_page()-based
  * iterations instead.
  */
 static inline unsigned int ploop_bat_entries(struct ploop *ploop,
-                                            unsigned int cluster,
+                                            unsigned int clu,
                                             u8 *bat_level)
 {
-       unsigned int *bat_entries, dst_cluster, id;
+       unsigned int *bat_entries, dst_clu, id;
        struct md_page *md;
 
-       id = bat_clu_to_page_nr(cluster);
+       id = bat_clu_to_page_nr(clu);
        md = md_page_find(ploop, id);
        BUG_ON(!md);
 
        /* Cluster index related to the page[page_nr] start */
-       cluster = bat_clu_idx_in_page(cluster);
+       clu = bat_clu_idx_in_page(clu);
 
        if (bat_level)
-               *bat_level = md->bat_levels[cluster];
+               *bat_level = md->bat_levels[clu];
 
        bat_entries = kmap_atomic(md->page);
-       dst_cluster = bat_entries[cluster];
+       dst_clu = bat_entries[clu];
        kunmap_atomic(bat_entries);
-       return dst_cluster;
+       return dst_clu;
 }
 
 static inline bool cluster_is_in_top_delta(struct ploop *ploop,
-                                          unsigned int cluster)
+                                          unsigned int clu)
 {
-       unsigned int dst_cluster;
+       unsigned int dst_clu;
        u8 level;
 
-       if (WARN_ON(cluster >= ploop->nr_bat_entries))
+       if (WARN_ON(clu >= ploop->nr_bat_entries))
                return false;
-       dst_cluster = ploop_bat_entries(ploop, cluster, &level);
+       dst_clu = ploop_bat_entries(ploop, clu, &level);
 
-       if (dst_cluster == BAT_ENTRY_NONE || level < top_level(ploop))
+       if (dst_clu == BAT_ENTRY_NONE || level < top_level(ploop))
                return false;
        return true;
 }
 
 static inline bool md_page_cluster_is_in_top_delta(struct ploop *ploop,
-                             struct md_page *md, unsigned int cluster)
+                             struct md_page *md, unsigned int clu)
 {
        unsigned int count, *bat_entries;
        bool ret = true;
 
        count = PAGE_SIZE / sizeof(map_index_t);
-       if ((cluster + 1) * sizeof(u8) > ksize(md->bat_levels) ||
-           cluster >= count) {
-               WARN_ONCE(1, "cluster=%u count=%u\n", cluster, count);
+       if ((clu + 1) * sizeof(u8) > ksize(md->bat_levels) ||
+           clu >= count) {
+               WARN_ONCE(1, "clu=%u count=%u\n", clu, count);
                return false;
        }
 
        bat_entries = kmap_atomic(md->page);
-       if (bat_entries[cluster] == BAT_ENTRY_NONE ||
-           md->bat_levels[cluster] < top_level(ploop))
+       if (bat_entries[clu] == BAT_ENTRY_NONE ||
+           md->bat_levels[clu] < top_level(ploop))
                ret = false;
        kunmap_atomic(bat_entries);
        return ret;
@@ -499,8 +499,8 @@ static inline bool fake_merge_pio(struct pio *pio)
 extern void md_page_insert(struct ploop *ploop, struct md_page *md);
 extern void ploop_free_md_page(struct md_page *md);
 extern void free_md_pages_tree(struct rb_root *root);
-extern bool try_update_bat_entry(struct ploop *ploop, unsigned int cluster,
-                                u8 level, unsigned int dst_cluster);
+extern bool try_update_bat_entry(struct ploop *ploop, unsigned int clu,
+                                u8 level, unsigned int dst_clu);
 extern int convert_bat_entries(u32 *bat_entries, u32 count);
 
 extern int ploop_add_delta(struct ploop *ploop, u32 level, struct file *file, 
bool is_raw);
@@ -511,7 +511,7 @@ extern void do_ploop_fsync_work(struct work_struct *ws);
 extern void ploop_event_work(struct work_struct *work);
 extern int ploop_clone_and_map(struct dm_target *ti, struct request *rq,
                    union map_info *map_context, struct request **clone);
-extern struct pio *find_lk_of_cluster(struct ploop *ploop, u32 cluster);
+extern struct pio *find_lk_of_cluster(struct ploop *ploop, u32 clu);
 extern void init_pio(struct ploop *ploop, unsigned int bi_op, struct pio *pio);
 extern int ploop_rw_page_sync(unsigned rw, struct file *file,
                              u64 index, struct page *page);


_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to