Greg Kurz <gk...@linux.vnet.ibm.com> writes: > The 396bb9874 commit reworked page table lookup to support kvm. > Unfortunately this breaks 32 bit build: > > target-ppc/mmu-hash64.h: In function ‘ppc_hash64_load_hpte0’: > target-ppc/mmu-hash64.h:90:23: error: cast to pointer from integer of > different size > > target-ppc/mmu-hash64.h: In function ‘ppc_hash64_load_hpte1’: > target-ppc/mmu-hash64.h:101:23: error: cast to pointer from integer of > different size > > The problem is that we have two cases to handle here: > - the HTAB is external and it is accessed with a pointer > - the HTAB is emulated and it is accessed with a hwaddr > > Depending on the way the HTAB is controlled, we should use the appropriate > type: > - controlled by kvm, it is copied to an allocated buffer (pointer) > - controlled by qemu with an allocated buffer (pointer) > - controlled by qemu with soft mmu (hwaddr) > > This patch introduces an explicit distinction between the two cases in > the new page table lookup code.
Reviewed-by: Aneesh Kumar K.V <aneesh.ku...@linux.vnet.ibm.com> > > Signed-off-by: Greg Kurz <gk...@linux.vnet.ibm.com> > --- > hw/ppc/spapr_hcall.c | 6 +++--- > target-ppc/kvm.c | 10 +++++----- > target-ppc/kvm_ppc.h | 10 +++++----- > target-ppc/mmu-hash64.c | 28 +++++++++++----------------- > target-ppc/mmu-hash64.h | 25 +++++++++++++++++-------- > 5 files changed, 41 insertions(+), 38 deletions(-) > > diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c > index 55d4eef..9653774 100644 > --- a/hw/ppc/spapr_hcall.c > +++ b/hw/ppc/spapr_hcall.c > @@ -51,7 +51,7 @@ static target_ulong h_enter(PowerPCCPU *cpu, > sPAPREnvironment *spapr, > target_ulong page_shift = 12; > target_ulong raddr; > target_ulong index; > - uint64_t token; > + ppc_hash64_access_t token; > > /* only handle 4k and 16M pages for now */ > if (pteh & HPTE64_V_LARGE) { > @@ -137,7 +137,7 @@ static RemoveResult remove_hpte(CPUPPCState *env, > target_ulong ptex, > target_ulong flags, > target_ulong *vp, target_ulong *rp) > { > - uint64_t token; > + ppc_hash64_access_t token; > target_ulong v, r, rb; > > if ((ptex * HASH_PTE_SIZE_64) & ~env->htab_mask) { > @@ -263,7 +263,7 @@ static target_ulong h_protect(PowerPCCPU *cpu, > sPAPREnvironment *spapr, > target_ulong flags = args[0]; > target_ulong pte_index = args[1]; > target_ulong avpn = args[2]; > - uint64_t token; > + ppc_hash64_access_t token; > target_ulong v, r, rb; > > if ((pte_index * HASH_PTE_SIZE_64) & ~env->htab_mask) { > diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c > index e2b07a4..2eaf956 100644 > --- a/target-ppc/kvm.c > +++ b/target-ppc/kvm.c > @@ -1952,7 +1952,7 @@ struct kvm_get_htab_buf { > target_ulong hpte[(HPTES_PER_GROUP * 2) + 1]; > }; > > -uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index) > +void *kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index) > { > int htab_fd; > struct kvm_get_htab_fd ghf; > @@ -1974,20 +1974,20 @@ uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, > target_ulong pte_index) > } > > close(htab_fd); > - return (uint64_t) hpte_buf->hpte; > + return hpte_buf->hpte; > > out_close: > g_free(hpte_buf); > close(htab_fd); > error_out: > - return 0; > + return NULL; > } > > -void kvmppc_hash64_free_pteg(uint64_t token) > +void kvmppc_hash64_free_pteg(void *pteg) > { > struct kvm_get_htab_buf *htab_buf; > > - htab_buf = container_of((void *)token, struct kvm_get_htab_buf, hpte); > + htab_buf = container_of((void *)pteg, struct kvm_get_htab_buf, hpte); > g_free(htab_buf); > return; > } > diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h > index a65d345..bf29e7c 100644 > --- a/target-ppc/kvm_ppc.h > +++ b/target-ppc/kvm_ppc.h > @@ -44,8 +44,8 @@ int kvmppc_get_htab_fd(bool write); > int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns); > int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, > uint16_t n_valid, uint16_t n_invalid); > -uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index); > -void kvmppc_hash64_free_pteg(uint64_t token); > +void *kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index); > +void kvmppc_hash64_free_pteg(void *pteg); > > void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index, > target_ulong pte0, target_ulong pte1); > @@ -199,13 +199,13 @@ static inline int kvmppc_load_htab_chunk(QEMUFile *f, > int fd, uint32_t index, > abort(); > } > > -static inline uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, > - target_ulong pte_index) > +static inline void *kvmppc_hash64_read_pteg(PowerPCCPU *cpu, > + target_ulong pte_index) > { > abort(); > } > > -static inline void kvmppc_hash64_free_pteg(uint64_t token) > +static inline void kvmppc_hash64_free_pteg(void *pteg) > { > abort(); > } > diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c > index 4e574d9..98878ef 100644 > --- a/target-ppc/mmu-hash64.c > +++ b/target-ppc/mmu-hash64.c > @@ -315,9 +315,10 @@ static int ppc_hash64_amr_prot(CPUPPCState *env, > ppc_hash_pte64_t pte) > return prot; > } > > -uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index) > +ppc_hash64_access_t ppc_hash64_start_access(PowerPCCPU *cpu, > + target_ulong pte_index) > { > - uint64_t token = 0; > + ppc_hash64_access_t token = { .phys_addr = 0UL }; > hwaddr pte_offset; > > pte_offset = pte_index * HASH_PTE_SIZE_64; > @@ -325,32 +326,25 @@ uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, > target_ulong pte_index) > /* > * HTAB is controlled by KVM. Fetch the PTEG into a new buffer. > */ > - token = kvmppc_hash64_read_pteg(cpu, pte_index); > - if (token) { > - return token; > - } > - /* > - * pteg read failed, even though we have allocated htab via > - * kvmppc_reset_htab. > - */ > - return 0; > + token.ext_ptr = kvmppc_hash64_read_pteg(cpu, pte_index); > + return token; > } > /* > * HTAB is controlled by QEMU. Just point to the internally > * accessible PTEG. > */ > if (cpu->env.external_htab) { > - token = (uint64_t) cpu->env.external_htab + pte_offset; > + token.ext_ptr = cpu->env.external_htab + pte_offset; > } else if (cpu->env.htab_base) { > - token = cpu->env.htab_base + pte_offset; > + token.phys_addr = cpu->env.htab_base + pte_offset; > } > return token; > } > > -void ppc_hash64_stop_access(uint64_t token) > +void ppc_hash64_stop_access(ppc_hash64_access_t token) > { > if (kvmppc_kern_htab) { > - return kvmppc_hash64_free_pteg(token); > + return kvmppc_hash64_free_pteg(token.ext_ptr); > } > } > > @@ -359,13 +353,13 @@ static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, > hwaddr hash, > ppc_hash_pte64_t *pte) > { > int i; > - uint64_t token; > + ppc_hash64_access_t token; > target_ulong pte0, pte1; > target_ulong pte_index; > > pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP; > token = ppc_hash64_start_access(ppc_env_get_cpu(env), pte_index); > - if (!token) { > + if (token.phys_addr == 0UL) { > return -1; > } > for (i = 0; i < HPTES_PER_GROUP; i++) { > diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h > index 9c9ca1d..eb55190 100644 > --- a/target-ppc/mmu-hash64.h > +++ b/target-ppc/mmu-hash64.h > @@ -79,28 +79,37 @@ void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong > index, > > > extern bool kvmppc_kern_htab; > -uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index); > -void ppc_hash64_stop_access(uint64_t token); > + > +typedef union { > + void *ext_ptr; > + hwaddr phys_addr; > +} ppc_hash64_access_t; > + > +ppc_hash64_access_t ppc_hash64_start_access(PowerPCCPU *cpu, > + target_ulong pte_index); > +void ppc_hash64_stop_access(ppc_hash64_access_t token); > > static inline target_ulong ppc_hash64_load_hpte0(CPUPPCState *env, > - uint64_t token, int index) > + ppc_hash64_access_t token, > + int index) > { > index *= HASH_PTE_SIZE_64; > if (env->external_htab) { > - return ldq_p((const void *)(token + index)); > + return ldq_p(token.ext_ptr + index); > } else { > - return ldq_phys(token + index); > + return ldq_phys(token.phys_addr + index); > } > } > > static inline target_ulong ppc_hash64_load_hpte1(CPUPPCState *env, > - uint64_t token, int index) > + ppc_hash64_access_t token, > + int index) > { > index *= HASH_PTE_SIZE_64; > if (env->external_htab) { > - return ldq_p((const void *)(token + index + HASH_PTE_SIZE_64/2)); > + return ldq_p(token.ext_ptr + index + HASH_PTE_SIZE_64/2); > } else { > - return ldq_phys(token + index + HASH_PTE_SIZE_64/2); > + return ldq_phys(token.phys_addr + index + HASH_PTE_SIZE_64/2); > } > } >