On 3/30/20 4:18 PM, Greg Kurz wrote: > On Mon, 30 Mar 2020 11:49:43 +0200 > Cédric Le Goater <c...@kaod.org> wrote: > >> This is moving code under a new ppc_radix64_xlate() routine shared by >> the MMU Radix page fault handler and the get_phys_page_debug PPC >> callback. The difference being that get_phys_page_debug does not >> generate exceptions. >> >> The specific part of process-scoped Radix translation is moved under >> ppc_radix64_process_scoped_xlate() in preparation of the future >> support for partition-scoped Radix translation. >> >> It should be functionally equivalent. >> >> Signed-off-by: Suraj Jitindar Singh <sjitindarsi...@gmail.com> >> Signed-off-by: Cédric Le Goater <c...@kaod.org> >> --- >> target/ppc/mmu-radix64.c | 201 ++++++++++++++++++++++----------------- >> 1 file changed, 116 insertions(+), 85 deletions(-) >> >> diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c >> index d2422d1c54c9..b4e6abcd2d35 100644 >> --- a/target/ppc/mmu-radix64.c >> +++ b/target/ppc/mmu-radix64.c >> @@ -219,17 +219,119 @@ static bool validate_pate(PowerPCCPU *cpu, uint64_t >> lpid, ppc_v3_pate_t *pate) >> return true; >> } >> >> +static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx, >> + vaddr eaddr, uint64_t lpid, uint64_t >> pid, >> + ppc_v3_pate_t pate, hwaddr *g_raddr, >> + int *g_prot, int *g_page_size, >> + bool cause_excp) >> +{ >> + CPUState *cs = CPU(cpu); >> + uint64_t offset, size, prtbe_addr, prtbe0, pte; >> + int fault_cause = 0; >> + hwaddr pte_addr; >> + >> + /* Index Process Table by PID to Find Corresponding Process Table Entry >> */ >> + offset = pid * sizeof(struct prtb_entry); >> + size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); >> + if (offset >= size) { >> + /* offset exceeds size of the process table */ >> + if (cause_excp) { >> + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); >> + } >> + return 1; >> + } >> + prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset; >> + prtbe0 = ldq_phys(cs->as, prtbe_addr); >> + >> + /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ >> + *g_page_size = PRTBE_R_GET_RTS(prtbe0); >> + pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, >> + prtbe0 & PRTBE_R_RPDB, prtbe0 & >> PRTBE_R_RPDS, >> + g_raddr, g_page_size, &fault_cause, >> &pte_addr); >> + >> + if (!(pte & R_PTE_VALID) || >> + ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot)) { > > Current code checks !pte instead of !(pte & R_PTE_VALID)... and I it seems > that ppc_radix64_walk_tree() already handles that: > > if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ > *fault_cause |= DSISR_NOPTE; > return 0; > }
There are more changes in the following patches when partition-scoped is introduced. > >> + /* No valid pte or access denied due to protection */ >> + if (cause_excp) { >> + ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); >> + } >> + return 1; >> + } >> + >> + ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot); >> + >> + return 0; >> +} >> + >> +static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx, >> + uint64_t lpid, uint64_t pid, bool relocation, >> + hwaddr *raddr, int *psizep, int *protp, >> + bool cause_excp) >> +{ >> + ppc_v3_pate_t pate; >> + int psize, prot; >> + hwaddr g_raddr; >> + >> + *psizep = INT_MAX; >> + *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; >> + >> + /* Get Process Table */ >> + if (cpu->vhyp && lpid == 0) { > > Current code doesn't check lpid == 0. Not sure to see what it's for... cpu->vhyp means a pseries machine, lpid == 0 means accessing quadrant3, so it's the kernel. > especially env->spr[SPR_LPIDR] is always 0 with pseries machine types > AFAICT... is it even possible to have lpid != 0 here ? When under PowerNV, SPR_LPIDR can be set, but not under pseries. C. > > > Rest LGTM. > >> + PPCVirtualHypervisorClass *vhc; >> + vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); >> + vhc->get_pate(cpu->vhyp, &pate); >> + } else { >> + if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { >> + if (cause_excp) { >> + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); >> + } >> + return 1; >> + } >> + if (!validate_pate(cpu, lpid, &pate)) { >> + if (cause_excp) { >> + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG); >> + } >> + return 1; >> + } >> + /* We don't support guest mode yet */ >> + if (lpid != 0) { >> + error_report("PowerNV guest support Unimplemented"); >> + exit(1); >> + } >> + } >> + >> + /* >> + * Perform process-scoped translation if relocation enabled. >> + * >> + * - Translates an effective address to a host real address in >> + * quadrants 0 and 3 when HV=1. >> + */ >> + if (relocation) { >> + int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, lpid, >> pid, >> + pate, &g_raddr, &prot, >> + &psize, cause_excp); >> + if (ret) { >> + return ret; >> + } >> + *psizep = MIN(*psizep, psize); >> + *protp &= prot; >> + } else { >> + g_raddr = eaddr & R_EADDR_MASK; >> + } >> + >> + *raddr = g_raddr; >> + return 0; >> +} >> + >> int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, >> int mmu_idx) >> { >> CPUState *cs = CPU(cpu); >> CPUPPCState *env = &cpu->env; >> - PPCVirtualHypervisorClass *vhc; >> - hwaddr raddr, pte_addr; >> - uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; >> - int page_size, prot, fault_cause = 0; >> - ppc_v3_pate_t pate; >> + uint64_t lpid = 0, pid = 0; >> + int page_size, prot; >> bool relocation; >> + hwaddr raddr; >> >> assert(!(msr_hv && cpu->vhyp)); >> assert((rwx == 0) || (rwx == 1) || (rwx == 2)); >> @@ -268,48 +370,11 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, >> vaddr eaddr, int rwx, >> return 1; >> } >> >> - /* Get Process Table */ >> - if (cpu->vhyp) { >> - vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); >> - vhc->get_pate(cpu->vhyp, &pate); >> - } else { >> - if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { >> - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); >> - return 1; >> - } >> - if (!validate_pate(cpu, lpid, &pate)) { >> - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG); >> - } >> - /* We don't support guest mode yet */ >> - if (lpid != 0) { >> - error_report("PowerNV guest support Unimplemented"); >> - exit(1); >> - } >> - } >> - >> - /* Index Process Table by PID to Find Corresponding Process Table Entry >> */ >> - offset = pid * sizeof(struct prtb_entry); >> - size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); >> - if (offset >= size) { >> - /* offset exceeds size of the process table */ >> - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); >> + /* Translate eaddr to raddr (where raddr is addr qemu needs for access) >> */ >> + if (ppc_radix64_xlate(cpu, eaddr, rwx, lpid, pid, relocation, &raddr, >> + &page_size, &prot, 1)) { >> return 1; >> } >> - prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); >> - >> - /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ >> - page_size = PRTBE_R_GET_RTS(prtbe0); >> - pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, >> - prtbe0 & PRTBE_R_RPDB, prtbe0 & >> PRTBE_R_RPDS, >> - &raddr, &page_size, &fault_cause, >> &pte_addr); >> - if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) >> { >> - /* Couldn't get pte or access denied due to protection */ >> - ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); >> - return 1; >> - } >> - >> - /* Update Reference and Change Bits */ >> - ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); >> >> tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, >> prot, mmu_idx, 1UL << page_size); >> @@ -318,16 +383,13 @@ int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, >> vaddr eaddr, int rwx, >> >> hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) >> { >> - CPUState *cs = CPU(cpu); >> CPUPPCState *env = &cpu->env; >> - PPCVirtualHypervisorClass *vhc; >> - hwaddr raddr, pte_addr; >> - uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; >> - int page_size, fault_cause = 0; >> - ppc_v3_pate_t pate; >> + uint64_t lpid = 0, pid = 0; >> + int psize, prot; >> + hwaddr raddr; >> >> /* Handle Real Mode */ >> - if (msr_dr == 0) { >> + if ((msr_dr == 0) && (msr_hv || (cpu->vhyp && lpid == 0))) { >> /* In real mode top 4 effective addr bits (mostly) ignored */ >> return eaddr & 0x0FFFFFFFFFFFFFFFULL; >> } >> @@ -337,39 +399,8 @@ hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, >> target_ulong eaddr) >> return -1; >> } >> >> - /* Get Process Table */ >> - if (cpu->vhyp) { >> - vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); >> - vhc->get_pate(cpu->vhyp, &pate); >> - } else { >> - if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { >> - return -1; >> - } >> - if (!validate_pate(cpu, lpid, &pate)) { >> - return -1; >> - } >> - /* We don't support guest mode yet */ >> - if (lpid != 0) { >> - error_report("PowerNV guest support Unimplemented"); >> - exit(1); >> - } >> - } >> - >> - /* Index Process Table by PID to Find Corresponding Process Table Entry >> */ >> - offset = pid * sizeof(struct prtb_entry); >> - size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); >> - if (offset >= size) { >> - /* offset exceeds size of the process table */ >> - return -1; >> - } >> - prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); >> - >> - /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ >> - page_size = PRTBE_R_GET_RTS(prtbe0); >> - pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, >> - prtbe0 & PRTBE_R_RPDB, prtbe0 & >> PRTBE_R_RPDS, >> - &raddr, &page_size, &fault_cause, >> &pte_addr); >> - if (!pte) { >> + if (ppc_radix64_xlate(cpu, eaddr, 0, lpid, pid, msr_dr, &raddr, &psize, >> + &prot, 0)) { >> return -1; >> } >> >