Hi Simon, On Sun, Dec 28, 2014 at 10:20 AM, Simon Glass <s...@chromium.org> wrote: > Memory Type Range Registers are used to tell the CPU whether memory is > cacheable and if so the cache write mode to use. > > Clean up the existing header file to follow style, and remove the unneeded > code. > > These can speed up booting so should be supported. Add these to global_data > so they can be requested while booting. We will apply the changes during > relocation (in a later commit). > > Signed-off-by: Simon Glass <s...@chromium.org> > --- > > arch/x86/cpu/Makefile | 1 + > arch/x86/cpu/coreboot/coreboot.c | 22 +++-- > arch/x86/cpu/ivybridge/car.S | 12 +-- > arch/x86/cpu/mtrr.c | 81 ++++++++++++++++++ > arch/x86/include/asm/global_data.h | 15 ++++ > arch/x86/include/asm/mtrr.h | 163 > +++++++++++++++++-------------------- > 6 files changed, 186 insertions(+), 108 deletions(-) > create mode 100644 arch/x86/cpu/mtrr.c > > diff --git a/arch/x86/cpu/Makefile b/arch/x86/cpu/Makefile > index 5033d2b..62e43c0 100644 > --- a/arch/x86/cpu/Makefile > +++ b/arch/x86/cpu/Makefile > @@ -17,5 +17,6 @@ obj-$(CONFIG_NORTHBRIDGE_INTEL_SANDYBRIDGE) += ivybridge/ > obj-$(CONFIG_NORTHBRIDGE_INTEL_IVYBRIDGE) += ivybridge/ > obj-$(CONFIG_INTEL_QUEENSBAY) += queensbay/ > obj-y += lapic.o > +obj-y += mtrr.o > obj-$(CONFIG_PCI) += pci.o > obj-y += turbo.o > diff --git a/arch/x86/cpu/coreboot/coreboot.c > b/arch/x86/cpu/coreboot/coreboot.c > index cfacc05..6d06d5a 100644 > --- a/arch/x86/cpu/coreboot/coreboot.c > +++ b/arch/x86/cpu/coreboot/coreboot.c > @@ -15,6 +15,7 @@ > #include <asm/cache.h> > #include <asm/cpu.h> > #include <asm/io.h> > +#include <asm/mtrr.h> > #include <asm/arch/tables.h> > #include <asm/arch/sysinfo.h> > #include <asm/arch/timestamp.h> > @@ -64,11 +65,6 @@ int board_eth_init(bd_t *bis) > return pci_eth_init(bis); > } > > -#define MTRR_TYPE_WP 5 > -#define MTRRcap_MSR 0xfe > -#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) > -#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) > - > void board_final_cleanup(void) > { > /* Un-cache the ROM so the kernel has one > @@ -77,15 +73,17 @@ void board_final_cleanup(void) > * Coreboot should have assigned this to the > * top available variable MTRR. > */ > - u8 top_mtrr = (native_read_msr(MTRRcap_MSR) & 0xff) - 1; > - u8 top_type = native_read_msr(MTRRphysBase_MSR(top_mtrr)) & 0xff; > + u8 top_mtrr = (native_read_msr(MTRR_CAP_MSR) & 0xff) - 1; > + u8 top_type = native_read_msr(MTRR_PHYS_BASE_MSR(top_mtrr)) & 0xff; > > /* Make sure this MTRR is the correct Write-Protected type */ > - if (top_type == MTRR_TYPE_WP) { > - disable_caches(); > - wrmsrl(MTRRphysBase_MSR(top_mtrr), 0); > - wrmsrl(MTRRphysMask_MSR(top_mtrr), 0); > - enable_caches(); > + if (top_type == MTRR_TYPE_WRPROT) { > + struct mtrr_state state; > + > + mtrr_open(&state); > + wrmsrl(MTRR_PHYS_BASE_MSR(top_mtrr), 0); > + wrmsrl(MTRR_PHYS_MASK_MSR(top_mtrr), 0); > + mtrr_close(&state); > } > > /* Issue SMI to Coreboot to lock down ME and registers */ > diff --git a/arch/x86/cpu/ivybridge/car.S b/arch/x86/cpu/ivybridge/car.S > index dca68e4..72b22ea 100644 > --- a/arch/x86/cpu/ivybridge/car.S > +++ b/arch/x86/cpu/ivybridge/car.S > @@ -61,7 +61,7 @@ clear_mtrrs: > > post_code(POST_CAR_MTRR) > /* Configure the default memory type to uncacheable */ > - movl $MTRRdefType_MSR, %ecx > + movl $MTRR_DEF_TYPE_MSR, %ecx > rdmsr > andl $(~0x00000cff), %eax > wrmsr > @@ -76,16 +76,16 @@ clear_mtrrs: > post_code(POST_CAR_BASE_ADDRESS) > /* Set Cache-as-RAM mask */ > movl $(MTRR_PHYS_MASK_MSR(0)), %ecx > - movl $(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax > + movl $(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax > movl $CPU_PHYSMASK_HI, %edx > wrmsr > > post_code(POST_CAR_MASK) > > /* Enable MTRR */ > - movl $MTRRdefType_MSR, %ecx > + movl $MTRR_DEF_TYPE_MSR, %ecx > rdmsr > - orl $MTRRdefTypeEn, %eax > + orl $MTRR_DEF_TYPE_EN, %eax > wrmsr > > /* Enable cache (CR0.CD = 0, CR0.NW = 0) */ > @@ -130,7 +130,7 @@ clear_mtrrs: > > movl $MTRR_PHYS_MASK_MSR(1), %ecx > movl $CPU_PHYSMASK_HI, %edx > - movl $(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax > + movl $(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax > wrmsr > > post_code(POST_CAR_ROM_CACHE) > @@ -141,7 +141,7 @@ clear_mtrrs: > xorl %edx, %edx > wrmsr > movl $MTRR_PHYS_MASK_MSR(2), %ecx > - movl $(CACHE_MRC_MASK | MTRRphysMaskValid), %eax > + movl $(CACHE_MRC_MASK | MTRR_PHYS_MASK_VALID), %eax > movl $CPU_PHYSMASK_HI, %edx > wrmsr > #endif > diff --git a/arch/x86/cpu/mtrr.c b/arch/x86/cpu/mtrr.c > new file mode 100644 > index 0000000..d5a825d1 > --- /dev/null > +++ b/arch/x86/cpu/mtrr.c > @@ -0,0 +1,81 @@ > +/* > + * (C) Copyright 2014 Google, Inc > + * > + * SPDX-License-Identifier: GPL-2.0+ > + * > + * Memory Type Range Regsters - these are used to tell the CPU whether > + * memory is cacheable and if so the cache write mode to use. > + * > + * These can speed up booting. See the mtrr command. > + * > + * Reference: Intel Architecture Software Developer's Manual, Volume 3: > + * System Programming > + */ > + > +#include <common.h> > +#include <asm/io.h> > +#include <asm/msr.h> > +#include <asm/mtrr.h> > + > +/* Prepare to adjust MTRRs */ > +void mtrr_open(struct mtrr_state *state) > +{ > + state->enable_cache = dcache_status(); > + > + if (state->enable_cache) > + disable_caches(); > + state->deftype = native_read_msr(MTRR_DEF_TYPE_MSR); > + wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype & ~MTRR_DEF_TYPE_EN); > +} > + > +/* Clean up after adjusting MTRRs, and enable them */ > +void mtrr_close(struct mtrr_state *state) > +{ > + wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype | MTRR_DEF_TYPE_EN); > + if (state->enable_cache) > + enable_caches(); > +} > + > +int mtrr_commit(bool do_caches) > +{ > + struct mtrr_request *req = gd->arch.mtrr_req; > + struct mtrr_state state; > + uint64_t mask; > + int i; > + > + mtrr_open(&state); > + for (i = 0; i < gd->arch.mtrr_req_count; i++, req++) { > + mask = ~(req->size - 1); > + mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1; > + wrmsrl(MTRR_PHYS_BASE_MSR(i), req->start | req->type); > + wrmsrl(MTRR_PHYS_MASK_MSR(i), mask | MTRR_PHYS_MASK_VALID); > + } > + > + /* Clear the ones that are unused */ > + for (; i < MTRR_COUNT; i++) > + wrmsrl(MTRR_PHYS_MASK_MSR(i), 0); > + mtrr_close(&state); > + > + return 0; > +} > + > +int mtrr_add_request(int type, uint64_t start, uint64_t size) > +{ > + struct mtrr_request *req; > + uint64_t mask; > + > + if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS) > + return -ENOSPC; > + req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++]; > + req->type = type; > + req->start = start; > + req->size = size; > + debug("%d: type=%d, %08llx %08llx\n", gd->arch.mtrr_req_count - 1, > + req->type, req->start, req->size); > + mask = ~(req->size - 1); > + mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1; > + mask |= MTRR_PHYS_MASK_VALID; > + debug(" %016llx %016llx\n", req->start | req->type, mask); > + > + return 0; > +} > diff --git a/arch/x86/include/asm/global_data.h > b/arch/x86/include/asm/global_data.h > index 03d491a..15e76f6 100644 > --- a/arch/x86/include/asm/global_data.h > +++ b/arch/x86/include/asm/global_data.h > @@ -29,6 +29,19 @@ struct memory_info { > struct memory_area area[CONFIG_NR_DRAM_BANKS]; > }; > > +#define MAX_MTRR_REQUESTS 8 > + > +/** > + * A request for a memory region to be set up in a particular way. These > + * requests are processed before board_init_r() is called. They are generally > + * optional and can be ignored with some performance impact. > + */ > +struct mtrr_request { > + int type; /* MTRR_TYPE_... */ > + uint64_t start; > + uint64_t size; > +}; > + > /* Architecture-specific global data */ > struct arch_global_data { > struct global_data *gd_addr; /* Location of Global Data */ > @@ -50,6 +63,8 @@ struct arch_global_data { > #ifdef CONFIG_HAVE_FSP > void *hob_list; /* FSP HOB list */ > #endif > + struct mtrr_request mtrr_req[MAX_MTRR_REQUESTS]; > + int mtrr_req_count; > }; > > #endif > diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h > index 5f05a48..3c11740 100644 > --- a/arch/x86/include/asm/mtrr.h > +++ b/arch/x86/include/asm/mtrr.h > @@ -9,99 +9,86 @@ > #ifndef _ASM_MTRR_H > #define _ASM_MTRR_H > > -/* These are the region types */ > -#define MTRR_TYPE_UNCACHEABLE 0 > -#define MTRR_TYPE_WRCOMB 1 > -/*#define MTRR_TYPE_ 2*/ > -/*#define MTRR_TYPE_ 3*/ > -#define MTRR_TYPE_WRTHROUGH 4 > -#define MTRR_TYPE_WRPROT 5 > -#define MTRR_TYPE_WRBACK 6 > -#define MTRR_NUM_TYPES 7 > - > -#define MTRRcap_MSR 0x0fe > -#define MTRRdefType_MSR 0x2ff > - > -#define MTRRdefTypeEn (1 << 11) > -#define MTRRdefTypeFixEn (1 << 10) > - > -#define SMRRphysBase_MSR 0x1f2 > -#define SMRRphysMask_MSR 0x1f3 > - > -#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) > -#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) > - > -#define MTRRphysMaskValid (1 << 11) > - > -#define NUM_FIXED_RANGES 88 > -#define RANGES_PER_FIXED_MTRR 8 > -#define MTRRfix64K_00000_MSR 0x250 > -#define MTRRfix16K_80000_MSR 0x258 > -#define MTRRfix16K_A0000_MSR 0x259 > -#define MTRRfix4K_C0000_MSR 0x268 > -#define MTRRfix4K_C8000_MSR 0x269 > -#define MTRRfix4K_D0000_MSR 0x26a > -#define MTRRfix4K_D8000_MSR 0x26b > -#define MTRRfix4K_E0000_MSR 0x26c > -#define MTRRfix4K_E8000_MSR 0x26d > -#define MTRRfix4K_F0000_MSR 0x26e > -#define MTRRfix4K_F8000_MSR 0x26f > +/* MTRR region types */ > +#define MTRR_TYPE_UNCACHEABLE 0 > +#define MTRR_TYPE_WRCOMB 1 > +#define MTRR_TYPE_WRTHROUGH 4 > +#define MTRR_TYPE_WRPROT 5 > +#define MTRR_TYPE_WRBACK 6 > + > +#define MTRR_TYPE_COUNT 7 > + > +#define MTRR_CAP_MSR 0x0fe > +#define MTRR_DEF_TYPE_MSR 0x2ff > + > +#define MTRR_DEF_TYPE_EN (1 << 11) > +#define MTRR_DEF_TYPE_FIX_EN (1 << 10) > + > +#define MTRR_PHYS_BASE_MSR(reg) (0x200 + 2 * (reg)) > +#define MTRR_PHYS_MASK_MSR(reg) (0x200 + 2 * (reg) + 1) > + > +#define MTRR_PHYS_MASK_VALID (1 << 11) > + > +#define MTRR_BASE_TYPE_MASK 0x7 > + > +/* Number of MTRRs supported */ > +#define MTRR_COUNT 8 > > #if !defined(__ASSEMBLER__) > > -/* > - * The MTRR code has some side effects that the callers should be aware for. > - * 1. The call sequence matters. x86_setup_mtrrs() calls > - * x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent > - * of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers > - * want to call the components of x86_setup_mtrrs() because of other > - * rquirements the ordering should still preserved. > - * 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because > - * of the nature of the global MTRR enable flag. Therefore, all direct > - * or indirect callers of enable_fixed_mtrr() should ensure that the > - * variable MTRR MSRs do not contain bad ranges. > - * 3. If CONFIG_CACHE_ROM is selected an MTRR is allocated for enabling > - * the caching of the ROM. However, it is set to uncacheable (UC). It > - * is the responsiblity of the caller to enable it by calling > - * x86_mtrr_enable_rom_caching(). > +/** > + * Information about the previous MTRR state, set up by mtrr_open() > + * > + * @deftype: Previous value of MTRR_DEF_TYPE_MSR > + * @enable_cache: true if cache was enabled > */ > -void x86_setup_mtrrs(void); > -/* > - * x86_setup_var_mtrrs() parameters: > - * address_bits - number of physical address bits supported by cpu > - * above4gb - 2 means dynamically detect number of variable MTRRs available. > - * non-zero means handle memory ranges above 4GiB. > - * 0 means ignore memory ranges above 4GiB > +struct mtrr_state { > + uint64_t deftype; > + bool enable_cache; > +}; > + > +/** > + * mtrr_open() - Prepare to adjust MTRRs > + * > + * Use mtrr_open() passing in a structure - this function will init it. Then > + * when done, pass the same structure to mtrr_close() to re-enable MTRRs and > + * possibly the cache. > + * > + * @state: Empty structure to pass in to hold settings > */ > -void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb); > -void enable_fixed_mtrr(void); > -void x86_setup_fixed_mtrrs(void); > -/* Set up fixed MTRRs but do not enable them. */ > -void x86_setup_fixed_mtrrs_no_enable(void); > -int x86_mtrr_check(void); > -/* ROM caching can be used after variable MTRRs are set up. Beware that > - * enabling CONFIG_CACHE_ROM will eat through quite a few MTRRs based on > - * one's IO hole size and WRCOMB resources. Be sure to check the console > - * log when enabling CONFIG_CACHE_ROM or adding WRCOMB resources. Beware that > - * on CPUs with core-scoped MTRR registers such as hyperthreaded CPUs the > - * rom caching will be disabled if all threads run the MTRR code. Therefore, > - * one needs to call x86_mtrr_enable_rom_caching() after all threads of the > - * same core have run the MTRR code. */ > -#if CONFIG_CACHE_ROM > -void x86_mtrr_enable_rom_caching(void); > -void x86_mtrr_disable_rom_caching(void); > -/* Return the variable range MTRR index of the ROM cache. */ > -long x86_mtrr_rom_cache_var_index(void); > -#else > -static inline void x86_mtrr_enable_rom_caching(void) {} > -static inline void x86_mtrr_disable_rom_caching(void) {} > -static inline long x86_mtrr_rom_cache_var_index(void) { return -1; } > -#endif /* CONFIG_CACHE_ROM */ > +void mtrr_open(struct mtrr_state *state); > > -#endif > +/** > + * mtrr_open() - Clean up after adjusting MTRRs, and enable them > + * > + * This uses the structure containing information returned from mtrr_open(). > + * > + * @state: Structure from mtrr_open() > + */ > +/* */ > +void mtrr_close(struct mtrr_state *state); > + > +/** > + * mtrr_add_request() - Add a new MTRR request > + * > + * This adds a request for a memory region to be set up in a particular way. > + * > + * @type: Requested type (MTRR_TYPE_) > + * @start: Start address > + * @size: Size > + */ > +int mtrr_add_request(int type, uint64_t start, uint64_t size); > + > +/** > + * mtrr_commit() - set up the MTRR registers based on current requests > + * > + * This sets up MTRRs for the available DRAM and the requests received so > far. > + * It must be called with caches disabled. > + * > + * @do_caches: true if caches are currently on > + */ > +int mtrr_commit(bool do_caches); > > -#if !defined(CONFIG_RAMTOP) > -# error "CONFIG_RAMTOP not defined" > #endif >
Can we move the removal of CONFIG_RAMTOP to the patch#2 in this series? > #if ((CONFIG_XIP_ROM_SIZE & (CONFIG_XIP_ROM_SIZE - 1)) != 0) > @@ -114,8 +101,4 @@ static inline long x86_mtrr_rom_cache_var_index(void) { > return -1; } > > #define CACHE_ROM_BASE (((1 << 20) - (CONFIG_CACHE_ROM_SIZE >> 12)) << 12) > > -#if (CONFIG_RAMTOP & (CONFIG_RAMTOP - 1)) != 0 > -# error "CONFIG_RAMTOP must be a power of 2" > -#endif > - Ditto. > #endif > -- Regards, Bin _______________________________________________ U-Boot mailing list U-Boot@lists.denx.de http://lists.denx.de/mailman/listinfo/u-boot