To keep balance in future, remember to pte_unmap() after a successful
get_ptep().  And (we might as well) pretend that flush_cache_pages()
really needed a map there, to read the pfn before "unmapping".

Signed-off-by: Hugh Dickins <hu...@google.com>
---
 arch/parisc/kernel/cache.c | 26 +++++++++++++++++++++-----
 1 file changed, 21 insertions(+), 5 deletions(-)

diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 1d3b8bc8a623..b0c969b3a300 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -425,10 +425,15 @@ void flush_dcache_page(struct page *page)
                offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
                addr = mpnt->vm_start + offset;
                if (parisc_requires_coherency()) {
+                       bool needs_flush = false;
                        pte_t *ptep;
 
                        ptep = get_ptep(mpnt->vm_mm, addr);
-                       if (ptep && pte_needs_flush(*ptep))
+                       if (ptep) {
+                               needs_flush = pte_needs_flush(*ptep);
+                               pte_unmap(ptep);
+                       }
+                       if (needs_flush)
                                flush_user_cache_page(mpnt, addr);
                } else {
                        /*
@@ -560,14 +565,20 @@ EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
 static void flush_cache_page_if_present(struct vm_area_struct *vma,
        unsigned long vmaddr, unsigned long pfn)
 {
-       pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
+       bool needs_flush = false;
+       pte_t *ptep;
 
        /*
         * The pte check is racy and sometimes the flush will trigger
         * a non-access TLB miss. Hopefully, the page has already been
         * flushed.
         */
-       if (ptep && pte_needs_flush(*ptep))
+       ptep = get_ptep(vma->vm_mm, vmaddr);
+       if (ptep) {
+               needs_flush = pte_needs_flush(*ptep))
+               pte_unmap(ptep);
+       }
+       if (needs_flush)
                flush_cache_page(vma, vmaddr, pfn);
 }
 
@@ -634,17 +645,22 @@ static void flush_cache_pages(struct vm_area_struct *vma, 
unsigned long start, u
        pte_t *ptep;
 
        for (addr = start; addr < end; addr += PAGE_SIZE) {
+               bool needs_flush = false;
                /*
                 * The vma can contain pages that aren't present. Although
                 * the pte search is expensive, we need the pte to find the
                 * page pfn and to check whether the page should be flushed.
                 */
                ptep = get_ptep(vma->vm_mm, addr);
-               if (ptep && pte_needs_flush(*ptep)) {
+               if (ptep) {
+                       needs_flush = pte_needs_flush(*ptep);
+                       pfn = pte_pfn(*ptep);
+                       pte_unmap(ptep);
+               }
+               if (needs_flush) {
                        if (parisc_requires_coherency()) {
                                flush_user_cache_page(vma, addr);
                        } else {
-                               pfn = pte_pfn(*ptep);
                                if (WARN_ON(!pfn_valid(pfn)))
                                        return;
                                __flush_cache_page(vma, addr, PFN_PHYS(pfn));
-- 
2.35.3

Reply via email to