Xen is using page as the smallest granularity for memory managment. And we want to follow the same concept in MPU system. That is, structure page_info and the frametable which is used for storing and managing page_info is also required in MPU system.
In MPU system, since there is no virtual address translation (VA == PA), we can not use a fixed VA address(FRAMETABLE_VIRT_START) to map frametable like MMU system does. Instead, we define a variable "struct page_info *frame_table" as frametable pointer, and ask boot allocator to allocate memory for frametable. As frametable is successfully initialized, the convertion between machine frame number/machine address/"virtual address" and page-info structure is ready too, like mfn_to_page/maddr_to_page/virt_to_page, etc Signed-off-by: Penny Zheng <penny.zh...@arm.com> Signed-off-by: Wei Chen <wei.c...@arm.com> --- xen/arch/arm/include/asm/mm.h | 15 --------------- xen/arch/arm/include/asm/mm_mmu.h | 16 ++++++++++++++++ xen/arch/arm/include/asm/mm_mpu.h | 17 +++++++++++++++++ xen/arch/arm/mm_mpu.c | 25 +++++++++++++++++++++++++ 4 files changed, 58 insertions(+), 15 deletions(-) diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h index e29158028a..7969ec9f98 100644 --- a/xen/arch/arm/include/asm/mm.h +++ b/xen/arch/arm/include/asm/mm.h @@ -176,7 +176,6 @@ struct page_info #define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma)))) -#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) /* PDX of the first page in the frame table. */ extern unsigned long frametable_base_pdx; @@ -280,20 +279,6 @@ static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t *pa, #define virt_to_mfn(va) __virt_to_mfn(va) #define mfn_to_virt(mfn) __mfn_to_virt(mfn) -/* Convert between Xen-heap virtual addresses and page-info structures. */ -static inline struct page_info *virt_to_page(const void *v) -{ - unsigned long va = (unsigned long)v; - unsigned long pdx; - - ASSERT(va >= XENHEAP_VIRT_START); - ASSERT(va < directmap_virt_end); - - pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT; - pdx += mfn_to_pdx(directmap_mfn_start); - return frame_table + pdx - frametable_base_pdx; -} - static inline void *page_to_virt(const struct page_info *pg) { return mfn_to_virt(mfn_x(page_to_mfn(pg))); diff --git a/xen/arch/arm/include/asm/mm_mmu.h b/xen/arch/arm/include/asm/mm_mmu.h index 6d7e5ddde7..bc1b04c4c7 100644 --- a/xen/arch/arm/include/asm/mm_mmu.h +++ b/xen/arch/arm/include/asm/mm_mmu.h @@ -23,6 +23,8 @@ extern uint64_t init_ttbr; extern void setup_directmap_mappings(unsigned long base_mfn, unsigned long nr_mfns); +#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START) + static inline paddr_t __virt_to_maddr(vaddr_t va) { uint64_t par = va_to_par(va); @@ -49,6 +51,20 @@ static inline void *maddr_to_virt(paddr_t ma) } #endif +/* Convert between Xen-heap virtual addresses and page-info structures. */ +static inline struct page_info *virt_to_page(const void *v) +{ + unsigned long va = (unsigned long)v; + unsigned long pdx; + + ASSERT(va >= XENHEAP_VIRT_START); + ASSERT(va < directmap_virt_end); + + pdx = (va - XENHEAP_VIRT_START) >> PAGE_SHIFT; + pdx += mfn_to_pdx(directmap_mfn_start); + return frame_table + pdx - frametable_base_pdx; +} + #endif /* __ARCH_ARM_MM_MMU__ */ /* diff --git a/xen/arch/arm/include/asm/mm_mpu.h b/xen/arch/arm/include/asm/mm_mpu.h index fe6a828a50..eebd5b5d35 100644 --- a/xen/arch/arm/include/asm/mm_mpu.h +++ b/xen/arch/arm/include/asm/mm_mpu.h @@ -9,6 +9,8 @@ */ extern void setup_static_mappings(void); +extern struct page_info *frame_table; + static inline paddr_t __virt_to_maddr(vaddr_t va) { /* In MPU system, VA == PA. */ @@ -22,6 +24,21 @@ static inline void *maddr_to_virt(paddr_t ma) return (void *)ma; } +/* Convert between virtual address to page-info structure. */ +static inline struct page_info *virt_to_page(const void *v) +{ + unsigned long va = (unsigned long)v; + unsigned long pdx; + + /* + * In MPU system, VA == PA, virt_to_maddr() outputs the + * exact input address. + */ + pdx = mfn_to_pdx(maddr_to_mfn(virt_to_maddr(va))); + + return frame_table + pdx - frametable_base_pdx; +} + #endif /* __ARCH_ARM_MM_MPU__ */ /* diff --git a/xen/arch/arm/mm_mpu.c b/xen/arch/arm/mm_mpu.c index f057ee26df..7b282be4fb 100644 --- a/xen/arch/arm/mm_mpu.c +++ b/xen/arch/arm/mm_mpu.c @@ -69,6 +69,8 @@ static DEFINE_SPINLOCK(xen_mpumap_lock); static paddr_t dtb_paddr; +struct page_info *frame_table; + /* Write a MPU protection region */ #define WRITE_PROTECTION_REGION(sel, pr, prbar_el2, prlar_el2) ({ \ uint64_t _sel = sel; \ @@ -564,6 +566,29 @@ void __init setup_static_mappings(void) /* TODO: guest memory section, device memory section, boot-module section, etc */ } +/* Map a frame table to cover physical addresses ps through pe */ +void __init setup_frametable_mappings(paddr_t ps, paddr_t pe) +{ + mfn_t base_mfn; + unsigned long nr_pdxs = mfn_to_pdx(mfn_add(maddr_to_mfn(pe), -1)) - + mfn_to_pdx(maddr_to_mfn(ps)) + 1; + unsigned long frametable_size = nr_pdxs * sizeof(struct page_info); + + frametable_base_pdx = mfn_to_pdx(maddr_to_mfn(ps)); + frametable_size = ROUNDUP(frametable_size, PAGE_SIZE); + /* + * Since VA == PA in MPU and we've already setup Xenheap mapping + * in setup_staticheap_mappings(), we could easily deduce the + * "virtual address" of frame table. + */ + base_mfn = alloc_boot_pages(frametable_size >> PAGE_SHIFT, 1); + frame_table = (struct page_info*)mfn_to_virt(base_mfn); + + memset(&frame_table[0], 0, nr_pdxs * sizeof(struct page_info)); + memset(&frame_table[nr_pdxs], -1, + frametable_size - (nr_pdxs * sizeof(struct page_info))); +} + /* TODO: Implementation on the first usage */ void dump_hyp_walk(vaddr_t addr) { -- 2.25.1