This patch implements THP shadow stack (SHSTK) copying in the same
way as in the previous patch for regular PTE.

In copy_huge_pmd(), clear the dirty bit from the PMD to cause a page
fault upon the next SHSTK access to the PMD.  At that time, fix the
PMD and copy/re-use the page.

Signed-off-by: Yu-cheng Yu <yu-cheng...@intel.com>
---
 arch/x86/mm/pgtable.c         | 8 ++++++++
 include/asm-generic/pgtable.h | 2 ++
 mm/huge_memory.c              | 4 ++++
 3 files changed, 14 insertions(+)

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index c2d754a780b3..8ff54bd978f3 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -901,6 +901,14 @@ inline pte_t pte_set_vma_features(pte_t pte, struct 
vm_area_struct *vma)
                return pte;
 }
 
+inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
+{
+       if (vma->vm_flags & VM_SHSTK)
+               return pmd_mkdirty_shstk(pmd);
+       else
+               return pmd;
+}
+
 inline bool arch_copy_pte_mapping(vm_flags_t vm_flags)
 {
        return (vm_flags & VM_SHSTK);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index ffcc0be7cadc..4940411b8e1c 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1190,9 +1190,11 @@ static inline bool arch_has_pfn_modify_check(void)
 
 #ifndef CONFIG_ARCH_HAS_SHSTK
 #define pte_set_vma_features(pte, vma) pte
+#define pmd_set_vma_features(pmd, vma) pmd
 #define arch_copy_pte_mapping(vma_flags) false
 #else
 pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma);
+pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma);
 bool arch_copy_pte_mapping(vm_flags_t vm_flags);
 #endif
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9f8bce9a6b32..eac1ee2f8985 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -608,6 +608,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct 
vm_fault *vmf,
 
                entry = mk_huge_pmd(page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+               entry = pmd_set_vma_features(entry, vma);
                page_add_new_anon_rmap(page, vma, haddr, true);
                mem_cgroup_commit_charge(page, memcg, false, true);
                lru_cache_add_active_or_unevictable(page, vma);
@@ -1250,6 +1251,7 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct 
vm_fault *vmf,
                pte_t entry;
                entry = mk_pte(pages[i], vma->vm_page_prot);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+               entry = pte_set_vma_features(entry, vma);
                memcg = (void *)page_private(pages[i]);
                set_page_private(pages[i], 0);
                page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
@@ -1332,6 +1334,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, 
pmd_t orig_pmd)
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+               entry = pmd_set_vma_features(entry, vma);
                if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
                        update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
                ret |= VM_FAULT_WRITE;
@@ -1404,6 +1407,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, 
pmd_t orig_pmd)
                pmd_t entry;
                entry = mk_huge_pmd(new_page, vma->vm_page_prot);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+               entry = pmd_set_vma_features(entry, vma);
                pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
                page_add_new_anon_rmap(new_page, vma, haddr, true);
                mem_cgroup_commit_charge(new_page, memcg, false, true);
-- 
2.17.1

Reply via email to