On 24.02.2023 16:06, Oleksii Kurochko wrote: > --- /dev/null > +++ b/xen/arch/riscv/include/asm/page.h > @@ -0,0 +1,90 @@ > +#ifndef _ASM_RISCV_PAGE_H > +#define _ASM_RISCV_PAGE_H > + > +#include <xen/const.h> > +#include <xen/types.h> > + > +#define PAGE_ENTRIES 512 > +#define VPN_BITS (9) > +#define VPN_MASK ((unsigned long)((1 << VPN_BITS) - 1)) > + > +#ifdef CONFIG_RISCV_64 > +/* L3 index Bit[47:39] */ > +#define THIRD_SHIFT (39) > +#define THIRD_MASK (VPN_MASK << THIRD_SHIFT) > +/* L2 index Bit[38:30] */ > +#define SECOND_SHIFT (30) > +#define SECOND_MASK (VPN_MASK << SECOND_SHIFT) > +/* L1 index Bit[29:21] */ > +#define FIRST_SHIFT (21) > +#define FIRST_MASK (VPN_MASK << FIRST_SHIFT) > +/* L0 index Bit[20:12] */ > +#define ZEROETH_SHIFT (12) > +#define ZEROETH_MASK (VPN_MASK << ZEROETH_SHIFT) > + > +#else // CONFIG_RISCV_32 > + > +/* L1 index Bit[31:22] */ > +#define FIRST_SHIFT (22) > +#define FIRST_MASK (VPN_MASK << FIRST_SHIFT) > + > +/* L0 index Bit[21:12] */ > +#define ZEROETH_SHIFT (12) > +#define ZEROETH_MASK (VPN_MASK << ZEROETH_SHIFT) > +#endif > + > +#define THIRD_SIZE (1 << THIRD_SHIFT) > +#define THIRD_MAP_MASK (~(THIRD_SIZE - 1)) > +#define SECOND_SIZE (1 << SECOND_SHIFT) > +#define SECOND_MAP_MASK (~(SECOND_SIZE - 1)) > +#define FIRST_SIZE (1 << FIRST_SHIFT) > +#define FIRST_MAP_MASK (~(FIRST_SIZE - 1)) > +#define ZEROETH_SIZE (1 << ZEROETH_SHIFT) > +#define ZEROETH_MAP_MASK (~(ZEROETH_SIZE - 1)) > + > +#define PTE_SHIFT 10 > + > +#define PTE_VALID BIT(0, UL) > +#define PTE_READABLE BIT(1, UL) > +#define PTE_WRITABLE BIT(2, UL) > +#define PTE_EXECUTABLE BIT(3, UL) > +#define PTE_USER BIT(4, UL) > +#define PTE_GLOBAL BIT(5, UL) > +#define PTE_ACCESSED BIT(6, UL) > +#define PTE_DIRTY BIT(7, UL) > +#define PTE_RSW (BIT(8, UL) | BIT(9, UL)) > + > +#define PTE_LEAF_DEFAULT (PTE_VALID | PTE_READABLE | PTE_WRITABLE | > PTE_EXECUTABLE) > +#define PTE_TABLE (PTE_VALID) > + > +/* Calculate the offsets into the pagetables for a given VA */ > +#define zeroeth_linear_offset(va) ((va) >> ZEROETH_SHIFT) > +#define first_linear_offset(va) ((va) >> FIRST_SHIFT) > +#define second_linear_offset(va) ((va) >> SECOND_SHIFT) > +#define third_linear_offset(va) ((va) >> THIRD_SHIFT) > + > +#define pagetable_zeroeth_index(va) zeroeth_linear_offset((va) & > ZEROETH_MASK) > +#define pagetable_first_index(va) first_linear_offset((va) & FIRST_MASK) > +#define pagetable_second_index(va) second_linear_offset((va) & SECOND_MASK) > +#define pagetable_third_index(va) third_linear_offset((va) & THIRD_MASK) > + > +/* Page Table entry */ > +typedef struct { > + uint64_t pte; > +} pte_t; > + > +/* Shift the VPN[x] or PPN[x] fields of a virtual or physical address > + * to become the shifted PPN[x] fields of a page table entry */ > +#define addr_to_ppn(x) (((x) >> PAGE_SHIFT) << PTE_SHIFT) > + > +static inline pte_t paddr_to_pte(unsigned long paddr) > +{ > + return (pte_t) { .pte = addr_to_ppn(paddr) }; > +} > + > +static inline bool pte_is_valid(pte_t *p)
Btw - const whenever possible please, especially in such basic helpers. > --- /dev/null > +++ b/xen/arch/riscv/mm.c > @@ -0,0 +1,223 @@ > +#include <xen/init.h> > +#include <xen/lib.h> > + > +#include <asm/csr.h> > +#include <asm/mm.h> > +#include <asm/page.h> > + > +/* > + * xen_second_pagetable is indexed with the VPN[2] page table entry field > + * xen_first_pagetable is accessed from the VPN[1] page table entry field > + * xen_zeroeth_pagetable is accessed from the VPN[0] page table entry field > + */ > +pte_t xen_second_pagetable[PAGE_ENTRIES] > __attribute__((__aligned__(PAGE_SIZE))); static? > +static pte_t xen_first_pagetable[PAGE_ENTRIES] > + __attribute__((__aligned__(PAGE_SIZE))); > +static pte_t xen_zeroeth_pagetable[PAGE_ENTRIES] > + __attribute__((__aligned__(PAGE_SIZE))); Please use __aligned() instead of open-coding it. You also may want to specifiy the section here explicitly, as .bss.page_aligned (as we do elsewhere). > +extern unsigned long _stext; > +extern unsigned long _etext; > +extern unsigned long __init_begin; > +extern unsigned long __init_end; > +extern unsigned long _srodata; > +extern unsigned long _erodata; Please use kernel.h and drop then colliding declarations. For what's left please use array types, as suggested elsewhere already. > +paddr_t phys_offset; > + > +#define resolve_early_addr(x) \ > + ({ > \ > + unsigned long * __##x; > \ > + if ( load_addr_start <= x && x < load_addr_end ) > \ Nit: Mismatched indentation. > + __##x = (unsigned long *)x; > \ > + else > \ > + __##x = (unsigned long *)(x + load_addr_start - > linker_addr_start); \ > + __##x; > \ > + }) > + > +static void __init clear_pagetables(unsigned long load_addr_start, > + unsigned long load_addr_end, > + unsigned long linker_addr_start, > + unsigned long linker_addr_end) Nit (style): Indentation. > +{ > + unsigned long *p; > + unsigned long page; > + unsigned long i; > + > + page = (unsigned long)&xen_second_pagetable[0]; > + > + p = resolve_early_addr(page); > + for ( i = 0; i < ARRAY_SIZE(xen_second_pagetable); i++ ) > + { > + p[i] = 0ULL; > + } We typically omit braces around single-statement bodies. Here, though: Why do you do this in the first place? These static arrays all start out zero-initialized anyway (from when you clear .bss). Plus even if they didn't - why not memset()? > + page = (unsigned long)&xen_first_pagetable[0]; > + p = resolve_early_addr(page); > + for ( i = 0; i < ARRAY_SIZE(xen_first_pagetable); i++ ) > + { > + p[i] = 0ULL; > + } > + > + page = (unsigned long)&xen_zeroeth_pagetable[0]; > + p = resolve_early_addr(page); > + for ( i = 0; i < ARRAY_SIZE(xen_zeroeth_pagetable); i++ ) > + { > + p[i] = 0ULL; > + } > +} > + > +/* > + * WARNING: load_addr() and linker_addr() are to be called only when the MMU > is > + * disabled and only when executed by the primary CPU. They cannot refer to > + * any global variable or functions. > + */ > + > +/* > + * Convert an addressed layed out at link time to the address where it was > loaded > + * by the bootloader. > + */ > +#define load_addr(linker_address) > \ > + ({ > \ > + unsigned long __linker_address = (unsigned long)(linker_address); > \ > + if ( linker_addr_start <= __linker_address && > \ > + __linker_address < linker_addr_end ) > \ > + { > \ > + __linker_address = > \ > + __linker_address - linker_addr_start + load_addr_start; > \ > + } > \ > + __linker_address; > \ > + }) > + > +/* Convert boot-time Xen address from where it was loaded by the boot loader > to the address it was layed out > + * at link-time. > + */ > +#define linker_addr(load_address) > \ > + ({ > \ > + unsigned long __load_address = (unsigned long)(load_address); > \ > + if ( load_addr_start <= __load_address && > \ > + __load_address < load_addr_end ) > \ > + { > \ > + __load_address = > \ > + __load_address - load_addr_start + linker_addr_start; > \ > + } > \ > + __load_address; > \ > + }) > + > +static void __attribute__((section(".entry"))) > +_setup_initial_pagetables(pte_t *second, pte_t *first, pte_t *zeroeth, Why the special section (also again further down)? Jan