This simplifies the callers and leads to a more efficient implementation
since the XArray has this functionality already.

Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Reviewed-by: Jan Kara <[email protected]>
Reviewed-by: William Kucharski <[email protected]>
---
 include/linux/pagemap.h |  4 ++--
 mm/filemap.c            |  9 +++++----
 mm/shmem.c              | 10 ++--------
 mm/swap.c               |  2 +-
 4 files changed, 10 insertions(+), 15 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 869dc371b800..d440c6750757 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -414,8 +414,8 @@ static inline struct page *find_subpage(struct page *head, 
pgoff_t index)
 }
 
 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
-                         unsigned int nr_entries, struct page **entries,
-                         pgoff_t *indices);
+               pgoff_t end, unsigned int nr_entries, struct page **entries,
+               pgoff_t *indices);
 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
                        pgoff_t end, unsigned int nr_pages,
                        struct page **pages);
diff --git a/mm/filemap.c b/mm/filemap.c
index 9f7f6b46aee4..6dc0a9b8c0fa 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1799,6 +1799,7 @@ static inline struct page *xas_find_get_entry(struct 
xa_state *xas,
  * find_get_entries - gang pagecache lookup
  * @mapping:   The address_space to search
  * @start:     The starting page cache index
+ * @end:       The final page index (inclusive).
  * @nr_entries:        The maximum number of entries
  * @entries:   Where the resulting entries are placed
  * @indices:   The cache indices corresponding to the entries in @entries
@@ -1822,9 +1823,9 @@ static inline struct page *xas_find_get_entry(struct 
xa_state *xas,
  *
  * Return: the number of pages and shadow entries which were found.
  */
-unsigned find_get_entries(struct address_space *mapping,
-                         pgoff_t start, unsigned int nr_entries,
-                         struct page **entries, pgoff_t *indices)
+unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
+               pgoff_t end, unsigned int nr_entries, struct page **entries,
+               pgoff_t *indices)
 {
        XA_STATE(xas, &mapping->i_pages, start);
        struct page *page;
@@ -1834,7 +1835,7 @@ unsigned find_get_entries(struct address_space *mapping,
                return 0;
 
        rcu_read_lock();
-       while ((page = xas_find_get_entry(&xas, ULONG_MAX, XA_PRESENT))) {
+       while ((page = xas_find_get_entry(&xas, end, XA_PRESENT))) {
                /*
                 * Terminate early on finding a THP, to allow the caller to
                 * handle it all at once; but continue if this is hugetlbfs.
diff --git a/mm/shmem.c b/mm/shmem.c
index a73ce8ce28e3..404e45c285ca 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -911,8 +911,6 @@ static void shmem_undo_range(struct inode *inode, loff_t 
lstart, loff_t lend,
                        struct page *page = pvec.pages[i];
 
                        index = indices[i];
-                       if (index >= end)
-                               break;
 
                        if (xa_is_value(page)) {
                                if (unfalloc)
@@ -965,9 +963,8 @@ static void shmem_undo_range(struct inode *inode, loff_t 
lstart, loff_t lend,
        while (index < end) {
                cond_resched();
 
-               pvec.nr = find_get_entries(mapping, index,
-                               min(end - index, (pgoff_t)PAGEVEC_SIZE),
-                               pvec.pages, indices);
+               pvec.nr = find_get_entries(mapping, index, end - 1,
+                               PAGEVEC_SIZE, pvec.pages, indices);
                if (!pvec.nr) {
                        /* If all gone or hole-punch or unfalloc, we're done */
                        if (index == start || end != -1)
@@ -980,9 +977,6 @@ static void shmem_undo_range(struct inode *inode, loff_t 
lstart, loff_t lend,
                        struct page *page = pvec.pages[i];
 
                        index = indices[i];
-                       if (index >= end)
-                               break;
-
                        if (xa_is_value(page)) {
                                if (unfalloc)
                                        continue;
diff --git a/mm/swap.c b/mm/swap.c
index d16d65d9b4e0..fcf6ccb94b09 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1060,7 +1060,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec,
                                pgoff_t start, unsigned nr_entries,
                                pgoff_t *indices)
 {
-       pvec->nr = find_get_entries(mapping, start, nr_entries,
+       pvec->nr = find_get_entries(mapping, start, ULONG_MAX, nr_entries,
                                    pvec->pages, indices);
        return pagevec_count(pvec);
 }
-- 
2.28.0

Reply via email to