Dual plane program must address two blocks located two different planes.

Signed-off-by: Bean Huo <bean...@micron.com>
---
 drivers/mtd/ubi/wl.c | 134 +++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 134 insertions(+)

diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 275d9fb..9d2268a 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -140,6 +140,7 @@ static int self_check_in_wl_tree(const struct ubi_device 
*ubi,
                                 struct ubi_wl_entry *e, struct rb_root *root);
 static int self_check_in_pq(const struct ubi_device *ubi,
                            struct ubi_wl_entry *e);
+static int produce_free_peb(struct ubi_device *ubi);
 
 /**
  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
@@ -371,6 +372,72 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct 
ubi_device *ubi,
        return e;
 }
 
+static struct ubi_wl_entry *find_wl_plane_entry(struct ubi_device *ubi,
+                               struct rb_root *root, int diff, int plane)
+{
+       struct rb_node *p;
+       struct ubi_wl_entry *e, *prev_e = NULL, *prev_bk_e = NULL, *bk_e = NULL;
+       int max;
+
+       e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+       max = e->ec + diff;
+
+       p = root->rb_node;
+       while (p) {
+               struct ubi_wl_entry *e1;
+
+               e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
+               if (e1->ec >= max)
+                       p = p->rb_left;
+               else {
+                       p = p->rb_right;
+                       prev_e = e;
+
+                       if (e->pnum%2 == plane)
+                               prev_bk_e = e;
+
+                       e = e1;
+
+                       if (e1->pnum%2 == plane)
+                               bk_e = e1;
+               }
+       }
+
+       /**
+       *If no fastmap has been written and this WL entry can be used
+       * as anchor PEB, hold it back and return the second best WL entry
+       * such that fastmap can use the anchor PEB later.
+       **/
+       if (prev_bk_e && !ubi->fm_disabled &&
+           !ubi->fm && bk_e->pnum < UBI_FM_MAX_START)
+               return prev_bk_e;
+
+       return bk_e;
+}
+
+static struct ubi_wl_entry *find_mean_plane_wl_entry(struct ubi_device *ubi,
+                                               struct rb_root *root, int plane)
+{
+       struct ubi_wl_entry *e, *first, *last;
+
+       first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+       last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
+
+       e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
+
+       if ((last->ec - first->ec < WL_FREE_MAX_DIFF) && (e->pnum%2 == plane)) {
+#ifdef CONFIG_MTD_UBI_FASTMAP
+               /* If no fastmap has been written and this WL entry can be used
+                * as anchor PEB, hold it back and return the second best
+                * WL entry such that fastmap can use the anchor PEB later. */
+               e = may_reserve_for_fm(ubi, e, root);
+#endif
+       } else
+               e = find_wl_plane_entry(ubi, root, WL_FREE_MAX_DIFF/2, plane);
+
+       return e;
+}
+
 /**
  * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
  * refill_wl_user_pool().
@@ -1751,6 +1818,50 @@ static int self_check_in_pq(const struct ubi_device *ubi,
        dump_stack();
        return -EINVAL;
 }
+
+static int __wl_get_plane_peb(struct ubi_device *ubi, int plane)
+{
+       int err;
+       struct ubi_wl_entry *e;
+
+retry:
+       if (!ubi->free.rb_node) {
+               if (ubi->works_count == 0) {
+                       ubi_err(ubi, "no free eraseblocks");
+                       ubi_assert(list_empty(&ubi->works));
+                       return -ENOSPC;
+               }
+
+               err = produce_free_peb(ubi);
+               if (err < 0)
+                       return err;
+               goto retry;
+       }
+
+       e = find_mean_plane_wl_entry(ubi, &ubi->free, plane);
+       if (!e) {
+               ubi_err(ubi, "no free eraseblocks");
+               return -ENOSPC;
+       }
+
+       self_check_in_wl_tree(ubi, e, &ubi->free);
+
+       /*
+        * Move the physical eraseblock to the protection queue where it will
+        * be protected from being moved for some time.
+        */
+       rb_erase(&e->u.rb, &ubi->free);
+       ubi->free_count--;
+       dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+#ifndef CONFIG_MTD_UBI_FASTMAP
+       /* We have to enqueue e only if fastmap is disabled,
+        * is fastmap enabled prot_queue_add() will be called by
+        * ubi_wl_get_peb() after removing e from the pool. */
+       prot_queue_add(ubi, e);
+#endif
+       return e->pnum;
+}
+
 #ifndef CONFIG_MTD_UBI_FASTMAP
 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 {
@@ -1839,6 +1950,29 @@ retry:
 
        return e->pnum;
 }
+
+int ubi_wl_get_plane_peb(struct ubi_device *ubi, int plane)
+{
+       int peb, err;
+
+       spin_lock(&ubi->wl_lock);
+       peb = __wl_get_plane_peb(ubi, plane);
+       spin_unlock(&ubi->wl_lock);
+
+       if (peb < 0)
+               return peb;
+
+       err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
+                                   ubi->peb_size - ubi->vid_hdr_aloffset);
+       if (err) {
+               ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes",
+                       peb);
+               return err;
+       }
+
+       return peb;
+}
+
 #else
 #include "fastmap-wl.c"
 #endif
-- 
1.9.1

Reply via email to