On 13/03/2025 12:51, Luca Fancellu wrote:
>
>
> Hi Julien,
>
>> On 13 Mar 2025, at 09:22, Julien Grall <jul...@xen.org> wrote:
>>
>> Hi,
>>
>> On 12/03/2025 13:52, Luca Fancellu wrote:
>>> Introduce variables and functions used in the common Arm code by
>>> MPU memory management subsystem, provide struct page_info and
>>> the MPU implementation for helpers and macros used in the common
>>> arm code.
>>> Moving virt_to_page helper to mmu/mpu part is not easy as it needs
>>> visibility of 'struct page_info', so protect it with CONFIG_MMU
>>> and provide the MPU variant in the #else branch.
>>
>> Have you considered including "asm/{mmu,mpu}/mm.h" **after** struct
>> page_info is declared?
>
> I didn’t tried that, but if we do that we solve all the issues (I don’t like
> #includes in the middle of headers,
> because of that I didn’t try).
>
> This is what it looks like:
>
> diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
> index 9bf5c846c86c..b49ec9c3dd1a 100644
> --- a/xen/arch/arm/include/asm/mm.h
> +++ b/xen/arch/arm/include/asm/mm.h
> @@ -14,14 +14,6 @@
> # error "unknown ARM variant"
> #endif
>
> -#if defined(CONFIG_MMU)
> -# include <asm/mmu/mm.h>
> -#elif defined(CONFIG_MPU)
> -# include <asm/mpu/mm.h>
> -#else
> -#error "Unknown memory management layout"
> -#endif
> -
> /* Align Xen to a 2 MiB boundary. */
> #define XEN_PADDR_ALIGN (1 << 21)
>
> @@ -264,53 +256,13 @@ static inline void __iomem *ioremap_wc(paddr_t start,
> size_t len)
> #define paddr_to_pfn_aligned(paddr) paddr_to_pfn(PAGE_ALIGN(paddr))
>
> #if defined(CONFIG_MMU)
> -
> -#ifdef CONFIG_ARM_32
> -/**
> - * Find the virtual address corresponding to a machine address
> - *
> - * Only memory backing the XENHEAP has a corresponding virtual address to
> - * be found. This is so we can save precious virtual space, as it's in
> - * short supply on arm32. This mapping is not subject to PDX compression
> - * because XENHEAP is known to be physically contiguous and can't hence
> - * jump over the PDX hole. This means we can avoid the roundtrips
> - * converting to/from pdx.
> - *
> - * @param ma Machine address
> - * @return Virtual address mapped to `ma`
> - */
> -static inline void *maddr_to_virt(paddr_t ma)
> -{
> - ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
> - ma -= mfn_to_maddr(directmap_mfn_start);
> - return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
> -}
> +# include <asm/mmu/mm.h>
> +#elif defined(CONFIG_MPU)
> +# include <asm/mpu/mm.h>
> #else
> -/**
> - * Find the virtual address corresponding to a machine address
> - *
> - * The directmap covers all conventional memory accesible by the
> - * hypervisor. This means it's subject to PDX compression.
> - *
> - * Note there's an extra offset applied (directmap_base_pdx) on top of the
> - * regular PDX compression logic. Its purpose is to skip over the initial
> - * range of non-existing memory, should there be one.
> - *
> - * @param ma Machine address
> - * @return Virtual address mapped to `ma`
> - */
> -static inline void *maddr_to_virt(paddr_t ma)
> -{
> - ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) <
> - (DIRECTMAP_SIZE >> PAGE_SHIFT));
> - return (void *)(XENHEAP_VIRT_START -
> - (directmap_base_pdx << PAGE_SHIFT) +
> - maddr_to_directmapoff(ma));
> -}
> +#error "Unknown memory management layout"
> #endif
>
> -#endif /* CONFIG_MMU */
> -
> /*
> * Translate a guest virtual address to a machine address.
> * Return the fault information if the translation has failed else 0.
> diff --git a/xen/arch/arm/include/asm/mmu/mm.h
> b/xen/arch/arm/include/asm/mmu/mm.h
> index 5ff2071133ee..9557f632d8e6 100644
> --- a/xen/arch/arm/include/asm/mmu/mm.h
> +++ b/xen/arch/arm/include/asm/mmu/mm.h
> @@ -21,6 +21,50 @@ extern unsigned long directmap_base_pdx;
> (paddr_t)((va_to_par(va_) & PADDR_MASK & PAGE_MASK) | (va_ &
> ~PAGE_MASK)); \
> })
>
> +#ifdef CONFIG_ARM_32
> +/**
> + * Find the virtual address corresponding to a machine address
> + *
> + * Only memory backing the XENHEAP has a corresponding virtual address to
> + * be found. This is so we can save precious virtual space, as it's in
> + * short supply on arm32. This mapping is not subject to PDX compression
> + * because XENHEAP is known to be physically contiguous and can't hence
> + * jump over the PDX hole. This means we can avoid the roundtrips
> + * converting to/from pdx.
> + *
> + * @param ma Machine address
> + * @return Virtual address mapped to `ma`
> + */
> +static inline void *maddr_to_virt(paddr_t ma)
> +{
> + ASSERT(is_xen_heap_mfn(maddr_to_mfn(ma)));
> + ma -= mfn_to_maddr(directmap_mfn_start);
> + return (void *)(unsigned long) ma + XENHEAP_VIRT_START;
> +}
> +#else
> +/**
> + * Find the virtual address corresponding to a machine address
> + *
> + * The directmap covers all conventional memory accesible by the
> + * hypervisor. This means it's subject to PDX compression.
> + *
> + * Note there's an extra offset applied (directmap_base_pdx) on top of the
> + * regular PDX compression logic. Its purpose is to skip over the initial
> + * range of non-existing memory, should there be one.
> + *
> + * @param ma Machine address
> + * @return Virtual address mapped to `ma`
> + */
> +static inline void *maddr_to_virt(paddr_t ma)
> +{
> + ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - directmap_base_pdx) <
> + (DIRECTMAP_SIZE >> PAGE_SHIFT));
> + return (void *)(XENHEAP_VIRT_START -
> + (directmap_base_pdx << PAGE_SHIFT) +
> + maddr_to_directmapoff(ma));
> +}
> +#endif
> +
> /*
> * Print a walk of a page table or p2m
> *
>
>
>
>>> Introduce FRAMETABLE_NR that is required for 'pdx_group_valid' in
>>> pdx.c.
>>
>>
>> Maybe clarify in the commit message that the frametable will be setup at a
>> later stage?
>
> Sure
>
>>
>>> Signed-off-by: Luca Fancellu <luca.fance...@arm.com>
>>> ---
>>> xen/arch/arm/include/asm/mm.h | 18 ++++++++++++++++++
>>> xen/arch/arm/include/asm/mpu/layout.h | 3 +++
>>> xen/arch/arm/include/asm/mpu/mm.h | 3 +++
>>> xen/arch/arm/mpu/mm.c | 4 ++++
>>> 4 files changed, 28 insertions(+)
>>> diff --git a/xen/arch/arm/include/asm/mm.h b/xen/arch/arm/include/asm/mm.h
>>> index e7767cdab493..c96d33aceaf0 100644
>>> --- a/xen/arch/arm/include/asm/mm.h
>>> +++ b/xen/arch/arm/include/asm/mm.h
>>> @@ -341,6 +341,8 @@ static inline uint64_t gvirt_to_maddr(vaddr_t va,
>>> paddr_t *pa,
>>> #define virt_to_mfn(va) __virt_to_mfn(va)
>>> #define mfn_to_virt(mfn) __mfn_to_virt(mfn)
>>> +#ifdef CONFIG_MMU
>>> +
>>> /* Convert between Xen-heap virtual addresses and page-info structures. */
>>> static inline struct page_info *virt_to_page(const void *v)
>>> {
>>> @@ -355,6 +357,22 @@ static inline struct page_info *virt_to_page(const
>>> void *v)
>>> return frame_table + pdx - frametable_base_pdx;
>>> }
>>> +#else /* !CONFIG_MMU */
>>> +
>>> +/* Convert between virtual address to page-info structure. */
>>> +static inline struct page_info *virt_to_page(const void *v)
>>> +{
>>> + unsigned long pdx;
>>> +
>>> + pdx = paddr_to_pdx(virt_to_maddr(v));
>>> + ASSERT(pdx >= frametable_base_pdx);
>>> + ASSERT(pdx < frametable_pdx_end);
>>> +
>>> + return frame_table + pdx - frametable_base_pdx;
>>> +}
>>> +
>>> +#endif /* CONFIG_MMU */
>>> +
>>> static inline void *page_to_virt(const struct page_info *pg)
>>> {
>>> return mfn_to_virt(mfn_x(page_to_mfn(pg)));
>>> diff --git a/xen/arch/arm/include/asm/mpu/layout.h
>>> b/xen/arch/arm/include/asm/mpu/layout.h
>>> index 248e55f8882d..c46b634c9c15 100644
>>> --- a/xen/arch/arm/include/asm/mpu/layout.h
>>> +++ b/xen/arch/arm/include/asm/mpu/layout.h
>>> @@ -3,6 +3,9 @@
>>> #ifndef __ARM_MPU_LAYOUT_H__
>>> #define __ARM_MPU_LAYOUT_H__
>>> +#define FRAMETABLE_SIZE GB(32)
>>
>> I guess you copied the value for the MMU code for arm64. But is this value
>> still sensible for MPU? What about arm32?
>>
>> In any case, some documentation would be useful.
>
> Yes I took the one from arm64, here I probably need some help as there are
> not too many
> informations about how to size this.
It depends on your estimate about max RAM size you want to support in MPU case.
32GB / 56B (size of page_info on Arm64) - tells you how many page_info structs
you can have. The above value * 4K - tells you amount of RAM supported.
~Michal