Hi Ayan,

On 03/11/2023 18:34, Ayan Kumar Halder wrote:
> 
> 
> All the MMU related functionality have been clubbed together in
> enable_boot_cpu_mm() for booting primary cpu and enable_secondary_cpu_mm() for
> booting secondary cpus.
> This is done in preparation for moving the code related to MMU in MMU specific
> file and in order to support non MMU cpus in future.
> 
> This is based on d2f8df5b3ede commit.
NIT: when referencing other commits, use <12 digits> ("<commit_title>")

> 
> Signed-off-by: Ayan Kumar Halder <ayan.kumar.hal...@amd.com>
> ---
> 
> Changes from :-
> 
> v1 - 1. Added a proper commit message.
> 2. Some style and other fixes suggested in v1.
> 
>  xen/arch/arm/arm32/head.S | 89 +++++++++++++++++++++++++++++----------
>  1 file changed, 67 insertions(+), 22 deletions(-)
> 
> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S
> index 2d7e690bf5..7004443798 100644
> --- a/xen/arch/arm/arm32/head.S
> +++ b/xen/arch/arm/arm32/head.S
> @@ -201,13 +201,11 @@ past_zImage:
> 
>          bl    check_cpu_mode
>          bl    cpu_init
> -        bl    create_page_tables
> 
> -        /* Address in the runtime mapping to jump to after the MMU is 
> enabled */
>          mov_w lr, primary_switched
> -        b     enable_mmu
> +        b     enable_boot_cpu_mm
> +
>  primary_switched:
> -        bl    setup_fixmap
>  #ifdef CONFIG_EARLY_PRINTK
>          /* Use a virtual address to access the UART. */
>          mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
> @@ -249,27 +247,11 @@ GLOBAL(init_secondary)
>  #endif
>          bl    check_cpu_mode
>          bl    cpu_init
> -        bl    create_page_tables
> 
> -        /* Address in the runtime mapping to jump to after the MMU is 
> enabled */
>          mov_w lr, secondary_switched
> -        b     enable_mmu
> -secondary_switched:
> -        /*
> -         * Non-boot CPUs need to move on to the proper pagetables, which were
> -         * setup in prepare_secondary_mm.
> -         *
> -         * XXX: This is not compliant with the Arm Arm.
> -         */
> -        mov_w r4, init_ttbr          /* VA of HTTBR value stashed by CPU 0 */
> -        ldrd  r4, r5, [r4]           /* Actual value */
> -        dsb
> -        mcrr  CP64(r4, r5, HTTBR)
> -        dsb
> -        isb
> -        flush_xen_tlb_local r0
> -        pt_enforce_wxn r0
> +        b     enable_secondary_cpu_mm
> 
> +secondary_switched:
>  #ifdef CONFIG_EARLY_PRINTK
>          /* Use a virtual address to access the UART. */
>          mov_w r11, EARLY_UART_VIRTUAL_ADDRESS
> @@ -692,6 +674,69 @@ ready_to_switch:
>          mov   pc, lr
>  ENDPROC(switch_to_runtime_mapping)
> 
> +/*
> + * Enable mm (turn on the data cache and the MMU) for secondary CPUs.
> + * The function will return to the virtual address provided in LR (e.g. the
> + * runtime mapping).
> + *
> + * Inputs:
> + *   lr : Virtual address to return to.
> + *
> + * Clobbers r0 - r6
> + */
> +enable_secondary_cpu_mm:
> +        mov   r6, lr
> +
> +        bl    create_page_tables
> +
> +        /* Address in the runtime mapping to jump to after the MMU is 
> enabled */
> +        mov_w lr, 1f
> +        b     enable_mmu
> +1:
> +        /*
> +         * Non-boot CPUs need to move on to the proper pagetables, which were
> +         * setup in prepare_secondary_mm.
> +         *
> +         * XXX: This is not compliant with the Arm Arm.
> +         */
> +        mov_w r4, init_ttbr          /* VA of HTTBR value stashed by CPU 0 */
> +        ldrd  r4, r5, [r4]           /* Actual value */
> +        dsb
> +        mcrr  CP64(r4, r5, HTTBR)
> +        dsb
> +        isb
> +        flush_xen_tlb_local r0
> +        pt_enforce_wxn r0
> +
> +        /* Return to the virtual address requested by the caller. */
> +        mov   pc, r6
> +ENDPROC(enable_secondary_cpu_mm)
> +
> +/*
> + * Enable mm (turn on the data cache and the MMU) for the boot CPU.
> + * The function will return to the virtual address provided in LR (e.g. the
> + * runtime mapping).
> + *
> + * Inputs:
> + *   lr : Virtual address to return to.
> + *
> + * Clobbers r0 - r6
> + */
> +enable_boot_cpu_mm:
> +        mov   r6, lr
> +
> +        bl    create_page_tables
> +
> +        /* Address in the runtime mapping to jump to after the MMU is 
> enabled */
> +        mov_w lr, 1f
> +        b     enable_mmu
> +1:
> +        /* Return to the virtual address requested by the caller. */
I find this comment a bit misleading as it reads as if this instruction was 
causing a return.

Apart from that, this change LGTM. Depending on the order of other arm32 head.S 
patches:
Reviewed-by: Michal Orzel <michal.or...@amd.com>

~Michal

Reply via email to