Accept that we will consume space in CPUState for CONFIG_USER_ONLY, since we cannot test CONFIG_SOFTMMU within hw/core/cpu.h.
Reviewed-by: Anton Johansson <a...@rev.ng> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- include/exec/cpu-defs.h | 150 ---------------------------------------- include/hw/core/cpu.h | 141 +++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+), 150 deletions(-) diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index 46d2ac570f..3915438b83 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -54,18 +54,7 @@ #include "exec/target_long.h" -/* - * Fix the number of mmu modes to 16, which is also the maximum - * supported by the softmmu tlb api. - */ -#define NB_MMU_MODES 16 - #if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG) -#include "exec/tlb-common.h" - -/* use a fully associative victim tlb of 8 entries */ -#define CPU_VTLB_SIZE 8 - #define CPU_TLB_DYN_MIN_BITS 6 #define CPU_TLB_DYN_DEFAULT_BITS 8 @@ -91,143 +80,4 @@ #endif /* CONFIG_SOFTMMU && CONFIG_TCG */ -#if defined(CONFIG_SOFTMMU) -/* - * The full TLB entry, which is not accessed by generated TCG code, - * so the layout is not as critical as that of CPUTLBEntry. This is - * also why we don't want to combine the two structs. - */ -typedef struct CPUTLBEntryFull { - /* - * @xlat_section contains: - * - For ram, an offset which must be added to the virtual address - * to obtain the ram_addr_t of the target RAM - * - For other memory regions, - * + in the lower TARGET_PAGE_BITS, the physical section number - * + with the TARGET_PAGE_BITS masked off, the offset within - * the target MemoryRegion - */ - hwaddr xlat_section; - - /* - * @phys_addr contains the physical address in the address space - * given by cpu_asidx_from_attrs(cpu, @attrs). - */ - hwaddr phys_addr; - - /* @attrs contains the memory transaction attributes for the page. */ - MemTxAttrs attrs; - - /* @prot contains the complete protections for the page. */ - uint8_t prot; - - /* @lg_page_size contains the log2 of the page size. */ - uint8_t lg_page_size; - - /* - * Additional tlb flags for use by the slow path. If non-zero, - * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW. - */ - uint8_t slow_flags[MMU_ACCESS_COUNT]; - - /* - * Allow target-specific additions to this structure. - * This may be used to cache items from the guest cpu - * page tables for later use by the implementation. - */ - union { - /* - * Cache the attrs and shareability fields from the page table entry. - * - * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2]. - * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format. - * For shareability and guarded, as in the SH and GP fields respectively - * of the VMSAv8-64 PTEs. - */ - struct { - uint8_t pte_attrs; - uint8_t shareability; - bool guarded; - } arm; - } extra; -} CPUTLBEntryFull; -#endif /* CONFIG_SOFTMMU */ - -#if defined(CONFIG_SOFTMMU) && defined(CONFIG_TCG) -/* - * Data elements that are per MMU mode, minus the bits accessed by - * the TCG fast path. - */ -typedef struct CPUTLBDesc { - /* - * Describe a region covering all of the large pages allocated - * into the tlb. When any page within this region is flushed, - * we must flush the entire tlb. The region is matched if - * (addr & large_page_mask) == large_page_addr. - */ - vaddr large_page_addr; - vaddr large_page_mask; - /* host time (in ns) at the beginning of the time window */ - int64_t window_begin_ns; - /* maximum number of entries observed in the window */ - size_t window_max_entries; - size_t n_used_entries; - /* The next index to use in the tlb victim table. */ - size_t vindex; - /* The tlb victim table, in two parts. */ - CPUTLBEntry vtable[CPU_VTLB_SIZE]; - CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE]; - CPUTLBEntryFull *fulltlb; -} CPUTLBDesc; - -/* - * Data elements that are shared between all MMU modes. - */ -typedef struct CPUTLBCommon { - /* Serialize updates to f.table and d.vtable, and others as noted. */ - QemuSpin lock; - /* - * Within dirty, for each bit N, modifications have been made to - * mmu_idx N since the last time that mmu_idx was flushed. - * Protected by tlb_c.lock. - */ - uint16_t dirty; - /* - * Statistics. These are not lock protected, but are read and - * written atomically. This allows the monitor to print a snapshot - * of the stats without interfering with the cpu. - */ - size_t full_flush_count; - size_t part_flush_count; - size_t elide_flush_count; -} CPUTLBCommon; - -/* - * The entire softmmu tlb, for all MMU modes. - * The meaning of each of the MMU modes is defined in the target code. - * Since this is placed within CPUNegativeOffsetState, the smallest - * negative offsets are at the end of the struct. - */ - -typedef struct CPUTLB { - CPUTLBCommon c; - CPUTLBDesc d[NB_MMU_MODES]; - CPUTLBDescFast f[NB_MMU_MODES]; -} CPUTLB; - -#else - -typedef struct CPUTLB { } CPUTLB; - -#endif /* CONFIG_SOFTMMU && CONFIG_TCG */ - -/* - * This structure must be placed in ArchCPU immediately - * before CPUArchState, as a field named "neg". - */ -typedef struct CPUNegativeOffsetState { - CPUTLB tlb; - IcountDecr icount_decr; -} CPUNegativeOffsetState; - #endif diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 648b5b3586..04baa5063c 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -25,6 +25,7 @@ #include "exec/cpu-common.h" #include "exec/hwaddr.h" #include "exec/memattrs.h" +#include "exec/tlb-common.h" #include "qapi/qapi-types-run-state.h" #include "qemu/bitmap.h" #include "qemu/rcu_queue.h" @@ -192,6 +193,137 @@ struct CPUClass { bool gdb_stop_before_watchpoint; }; +/* + * Fix the number of mmu modes to 16, which is also the maximum + * supported by the softmmu tlb api. + */ +#define NB_MMU_MODES 16 + +/* Use a fully associative victim tlb of 8 entries. */ +#define CPU_VTLB_SIZE 8 + +/* + * The full TLB entry, which is not accessed by generated TCG code, + * so the layout is not as critical as that of CPUTLBEntry. This is + * also why we don't want to combine the two structs. + */ +typedef struct CPUTLBEntryFull { + /* + * @xlat_section contains: + * - in the lower TARGET_PAGE_BITS, a physical section number + * - with the lower TARGET_PAGE_BITS masked off, an offset which + * must be added to the virtual address to obtain: + * + the ram_addr_t of the target RAM (if the physical section + * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM) + * + the offset within the target MemoryRegion (otherwise) + */ + hwaddr xlat_section; + + /* + * @phys_addr contains the physical address in the address space + * given by cpu_asidx_from_attrs(cpu, @attrs). + */ + hwaddr phys_addr; + + /* @attrs contains the memory transaction attributes for the page. */ + MemTxAttrs attrs; + + /* @prot contains the complete protections for the page. */ + uint8_t prot; + + /* @lg_page_size contains the log2 of the page size. */ + uint8_t lg_page_size; + + /* + * Additional tlb flags for use by the slow path. If non-zero, + * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW. + */ + uint8_t slow_flags[MMU_ACCESS_COUNT]; + + /* + * Allow target-specific additions to this structure. + * This may be used to cache items from the guest cpu + * page tables for later use by the implementation. + */ + union { + /* + * Cache the attrs and shareability fields from the page table entry. + * + * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2]. + * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format. + * For shareability and guarded, as in the SH and GP fields respectively + * of the VMSAv8-64 PTEs. + */ + struct { + uint8_t pte_attrs; + uint8_t shareability; + bool guarded; + } arm; + } extra; +} CPUTLBEntryFull; + +/* + * Data elements that are per MMU mode, minus the bits accessed by + * the TCG fast path. + */ +typedef struct CPUTLBDesc { + /* + * Describe a region covering all of the large pages allocated + * into the tlb. When any page within this region is flushed, + * we must flush the entire tlb. The region is matched if + * (addr & large_page_mask) == large_page_addr. + */ + vaddr large_page_addr; + vaddr large_page_mask; + /* host time (in ns) at the beginning of the time window */ + int64_t window_begin_ns; + /* maximum number of entries observed in the window */ + size_t window_max_entries; + size_t n_used_entries; + /* The next index to use in the tlb victim table. */ + size_t vindex; + /* The tlb victim table, in two parts. */ + CPUTLBEntry vtable[CPU_VTLB_SIZE]; + CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE]; + CPUTLBEntryFull *fulltlb; +} CPUTLBDesc; + +/* + * Data elements that are shared between all MMU modes. + */ +typedef struct CPUTLBCommon { + /* Serialize updates to f.table and d.vtable, and others as noted. */ + QemuSpin lock; + /* + * Within dirty, for each bit N, modifications have been made to + * mmu_idx N since the last time that mmu_idx was flushed. + * Protected by tlb_c.lock. + */ + uint16_t dirty; + /* + * Statistics. These are not lock protected, but are read and + * written atomically. This allows the monitor to print a snapshot + * of the stats without interfering with the cpu. + */ + size_t full_flush_count; + size_t part_flush_count; + size_t elide_flush_count; +} CPUTLBCommon; + +/* + * The entire softmmu tlb, for all MMU modes. + * The meaning of each of the MMU modes is defined in the target code. + * Since this is placed within CPUNegativeOffsetState, the smallest + * negative offsets are at the end of the struct. + */ +typedef struct CPUTLB { +#ifdef CONFIG_TCG + CPUTLBCommon c; + CPUTLBDesc d[NB_MMU_MODES]; + CPUTLBDescFast f[NB_MMU_MODES]; +#endif +} CPUTLB; + /* * Low 16 bits: number of cycles left, used only in icount mode. * High 16 bits: Set to -1 to force TCG to stop executing linked TBs @@ -212,6 +344,15 @@ typedef union IcountDecr { } u16; } IcountDecr; +/* + * This structure must be placed in ArchCPU immediately + * before CPUArchState, as a field named "neg". + */ +typedef struct CPUNegativeOffsetState { + CPUTLB tlb; + IcountDecr icount_decr; +} CPUNegativeOffsetState; + typedef struct CPUBreakpoint { vaddr pc; int flags; /* BP_* */ -- 2.34.1