On Wed, Dec 17, 2008 at 11:04 PM, Corey Osgood <[email protected]>wrote:
> On Wed, Dec 17, 2008 at 8:55 PM, Peter Stuge <[email protected]> wrote:
>
>> Corey Osgood wrote:
>> > would there be any problem with calling functions to enable mtrrs
>> > and the cache (if it's not already) from the end of disable_car()?
>>
>> None whatsoever. Commit at will.
>
>
> It didn't help, disable_car() already does essentially the same thing;
> disable cache, enable mtrrs, re-enable cache. I'm comparing memtest between
> the stock bios and coreboot right now, the throughput for the stock bios is
> 6122MB/s for L1 cache and 574MB/s for memory. With v2, it's 3265 and 191,
> respectively (using ROMCC), with v3 it's 15 and 18. So something's not right
> somewhere. The other thing is that in v2 and v3, the CPU is only running at
> 800MHz in memtest, but with the stock BIOS it runs at 1.5GHz, that's
> probably the reason for the differing cache throughputs. Anyways, I'm diving
> into both v2 and v3 and trying to track down why this is running so slowly.
>
<insert happy dance here>
>From the currently-running memtest86 on coreboot-v3:
L1 Cache: 128K 3265 MB/s
Memory : 480M 240 MB/s
That's right, faster then v2 :) I've managed to coerce the northbridge into
running the memory at 200MHz (DDR400) without locking up the system like it
does in v2, and also to use 1GB of ram, which the fuctory BIOS only sees as
512MB, and v2 for some reason trips over. However, it's a mixed blessing,
even though memtest86 now runs at an acceptable speed, coreboot is still
running fairly slowly. I'm attaching a patch that brings over mtrr.c from v2
and hacks it to work with v3, but no sign-off because IMO it's not ready to
be committed. I'll try booting a FILO payload and a kernel tomorrow, but
right now it's time for some sleep.
-Corey
Index: southbridge/via/vt8237/stage1.c
===================================================================
--- southbridge/via/vt8237/stage1.c (revision 1078)
+++ southbridge/via/vt8237/stage1.c (working copy)
@@ -155,8 +155,9 @@
*/
void enable_smbus(u16 smbus_io_base)
{
- u32 dev;
+ u32 dev = PCI_BDF(0, 17, 0);
+#if 0
/* Power management controller */
pci_conf1_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT8237R_LPC,
&dev);
@@ -178,6 +179,7 @@
printk(BIOS_DEBUG, "VT8237R Power management controller found "
"at 0x%x\n", dev);
}
+#endif
/* 7 = SMBus Clock from RTC 32.768KHz
* 5 = Internal PLL reset from susp
@@ -201,36 +203,14 @@
inb(smbus_io_base + SMBHSTCTL);
}
-/* The change from RAID to SATA in phase6 causes coreboot to lock up, so do it
- * as early as possible. Move back to stage2 later */
-static void sata_stage1(void)
-{
- u32 dev;
- u8 reg;
-
- pci_conf1_find_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_VT8237R_SATA, &dev);
-
- printk(BIOS_DEBUG, "Configuring VIA SATA controller\n");
-
- /* Class IDE Disk */
- reg = pci_conf1_read_config8(dev, SATA_MISC_CTRL);
- reg &= 0x7f; /* Sub Class Write Protect off */
- pci_conf1_write_config8(dev, SATA_MISC_CTRL, reg);
-
- /* Change the device class to SATA from RAID. */
- pci_conf1_write_config8(dev, PCI_CLASS_DEVICE, 0x1);
- reg |= 0x80; /* Sub Class Write Protect on */
- pci_conf1_write_config8(dev, SATA_MISC_CTRL, reg);
-}
-
void vt8237_stage1(u16 smbus_io_base)
{
- u32 dev;
+ u32 dev = PCI_BDF(0, 17, 0);
u32 ide_dev;
printk(BIOS_DEBUG, "Doing vt8237r/s stage1 init\n");
- pci_conf1_find_device(0x1106, 0x3227, &dev);
+ //pci_conf1_find_device(0x1106, 0x3227, &dev);
pci_conf1_find_device(0x1106, 0x0571, &ide_dev);
/* Disable GP3 timer, or else the system reboots when it runs out.
Index: mainboard/jetway/j7f2/dts
===================================================================
--- mainboard/jetway/j7f2/dts (revision 1078)
+++ mainboard/jetway/j7f2/dts (working copy)
@@ -66,7 +66,7 @@
/* How do I represent the bus and pci devices hanging here? */
p...@1,0 {
/config/("northbridge/via/cn700/pci.dts");
- p...@0,1 {
+ p...@0,0 {
/config/("northbridge/via/cn700/vga.dts");
};
};
Index: northbridge/via/cn700/pci_domain.c
===================================================================
--- northbridge/via/cn700/pci_domain.c (revision 1078)
+++ northbridge/via/cn700/pci_domain.c (working copy)
@@ -108,11 +108,12 @@
}
/* Report the memory regions. */
idx = 10;
- /* TODO: Hole needed? */
- ram_resource(dev, idx++, 0, 640); /* First 640k */
+
+ ram_resource(dev, idx++, 0, 640);
/* Leave a hole for VGA, 0xa0000 - 0xc0000 */
- ram_resource(dev, idx++, 768,
- (tolmk - 768 - (CONFIG_CN700_VIDEO_MB * 1024)));
+ /* TODO: shadow ram needs to be controlled via dts */
+ ram_resource(dev, idx++, 1024,
+ (tolmk - 1024 - (CONFIG_CN700_VIDEO_MB * 1024)));
phase4_assign_resources(&dev->link[0]);
}
@@ -129,5 +130,3 @@
.phase6_init = 0,
.ops_pci_bus = &pci_cf8_conf1,
};
-
-
Index: northbridge/via/cn700/stage1.c
===================================================================
--- northbridge/via/cn700/stage1.c (revision 1078)
+++ northbridge/via/cn700/stage1.c (working copy)
@@ -24,59 +24,6 @@
#include <config.h>
#include "cn700.h"
-static void enable_shadow_ram(void)
-{
- u8 shadowreg;
-
- printk(BIOS_DEBUG, "Enabling shadow ram\n");
- /* Enable shadow ram as normal dram */
- /* 0xc0000-0xcffff */
- pci_conf1_write_config8(PCI_BDF(0, 0, 3), 0x80, 0xff);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x61, 0xff);
- /* 0xd0000-0xdffff */
- pci_conf1_write_config8(PCI_BDF(0, 0, 3), 0x81, 0xff);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x62, 0xff);
- /* 0xe0000-0xeffff */
- pci_conf1_write_config8(PCI_BDF(0, 0, 3), 0x82, 0xff);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x64, 0xff);
-
- /* 0xf0000-0xfffff */
- shadowreg = pci_conf1_read_config8(PCI_BDF(0, 0, 3), 0x83);
- shadowreg |= 0x30;
- pci_conf1_write_config8(PCI_BDF(0, 0, 3), 0x83, shadowreg);
-
- /* Do it again for the vlink controller */
- shadowreg = pci_conf1_read_config8(PCI_BDF(0, 0, 7), 0x63);
- shadowreg |= 0x30;
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x63, shadowreg);
-}
-
-static void enable_vlink(void)
-{
- printk(BIOS_DEBUG, "Enabling Via V-Link\n");
-
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x42, 0x88);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x45, 0x44);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x46, 0x00);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x47, 0x04);
- //pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x48, 0x13);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x4b, 0x80);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x4c, 0x82);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x4d, 0x44);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x4e, 0x00);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x4f, 0x01);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0xb4, 0x35);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0xb5, 0x66);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0xb6, 0x66);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0xb7, 0x64);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0xb8, 0x45);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0xb9, 0x98);
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0xba, 0x77);
-
- /* This has to be done last, I think */
- pci_conf1_write_config8(PCI_BDF(0, 0, 7), 0x48, 0x13);
-}
-
/**
* Configure the bus between the cpu and the northbridge. This might be able
to
* be moved to post-ram code in the future. For the most part, these registers
@@ -92,6 +39,7 @@
static void c7_cpu_setup(void)
{
u32 dev = PCI_BDF(0, 0, 2);
+ u8 reg8;
/* Host bus interface registers (D0F2 0x50-0x67) */
/* Request phase control */
@@ -114,6 +62,13 @@
* 110/111 : Reserved
* bits 4:0: Reserved
*/
+
+ reg8 = pci_conf1_read_config8(dev, 0x57);
+ reg8 &= (0x7 << 5);
+ //reg8 |= (0x4 << 5);
+ reg8 |= (0x3 << 5);
+ pci_conf1_write_config8(dev, 0x57, reg8);
+
/* CPU Miscellaneous Control */
pci_conf1_write_config8(dev, 0x59, 0x44);
/* Write Policy */
@@ -179,7 +134,5 @@
pci_conf1_write_config8(PCI_BDF(0, 1, 0), 0x19, 0x1);
pci_conf1_write_config8(PCI_BDF(0, 1, 0), 0x1a, 0x1);
- enable_shadow_ram();
- enable_vlink();
c7_cpu_setup();
}
Index: arch/x86/via/c7.c
===================================================================
--- arch/x86/via/c7.c (revision 1078)
+++ arch/x86/via/c7.c (working copy)
@@ -203,8 +203,8 @@
/* Set up Memory Type Range Registers */
//these don't exist yet
- //x86_setup_mtrrs(36);
- //x86_mtrr_check();
+ x86_setup_mtrrs(36);
+ x86_mtrr_check();
/* Enable the local cpu apics */
//setup_lapic();
Index: arch/x86/mtrr.c
===================================================================
--- arch/x86/mtrr.c (revision 0)
+++ arch/x86/mtrr.c (revision 0)
@@ -0,0 +1,449 @@
+/*
+ * mtrr.c: setting MTRR to decent values for cache initialization on P6
+ *
+ * Derived from intel_set_mtrr in intel_subr.c and mtrr.c in linux kernel
+ *
+ * Copyright 2000 Silicon Integrated System Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * Reference: Intel Architecture Software Developer's Manual, Volume 3: System
Programming
+ */
+
+/*
+ 2005.1 yhlu add NC support to spare mtrrs for 64G memory above
installed
+ 2005.6 Eric add address bit in x86_setup_mtrrs
+ 2005.6 yhlu split x86_setup_var_mtrrs and x86_setup_fixed_mtrrs,
+ for AMD, it will not use x86_setup_fixed_mtrrs
+*/
+
+#include <types.h>
+#include <console.h>
+#include <arch/x86/msr.h>
+#include <arch/x86/mtrr.h>
+#include <arch/x86/cpu.h>
+
+static unsigned int mtrr_msr[] = {
+ MTRRfix64K_00000_MSR, MTRRfix16K_80000_MSR, MTRRfix16K_A0000_MSR,
+ MTRRfix4K_C0000_MSR, MTRRfix4K_C8000_MSR, MTRRfix4K_D0000_MSR,
MTRRfix4K_D8000_MSR,
+ MTRRfix4K_E0000_MSR, MTRRfix4K_E8000_MSR, MTRRfix4K_F0000_MSR,
MTRRfix4K_F8000_MSR,
+};
+
+void enable_fixed_mtrr(void)
+{
+ struct msr msr;
+
+ msr = rdmsr(MTRRdefType_MSR);
+ msr.lo |= 0xc00;
+ wrmsr(MTRRdefType_MSR, msr);
+}
+
+static void enable_var_mtrr(void)
+{
+ struct msr msr;
+
+ msr = rdmsr(MTRRdefType_MSR);
+ msr.lo |= 0x800;
+ wrmsr(MTRRdefType_MSR, msr);
+}
+
+/* setting variable mtrr, comes from linux kernel source */
+static void set_var_mtrr_stage2(
+ unsigned int reg, unsigned long basek, unsigned long sizek,
+ unsigned long type, unsigned address_bits)
+{
+ struct msr base, mask;
+ unsigned address_mask_high;
+
+ if (reg >= 8)
+ return;
+
+ // it is recommended that we disable and enable cache when we
+ // do this.
+ if (sizek == 0) {
+ disable_cache();
+
+ struct msr zero;
+ zero.lo = zero.hi = 0;
+ /* The invalid bit is kept in the mask, so we simply clear the
+ relevant mask register to disable a range. */
+ wrmsr (MTRRphysMask_MSR(reg), zero);
+
+ enable_cache();
+ return;
+ }
+
+
+ address_mask_high = ((1u << (address_bits - 32u)) - 1u);
+
+ base.hi = basek >> 22;
+ base.lo = basek << 10;
+
+ printk(BIOS_SPEW, "ADDRESS_MASK_HIGH=%#x\n", address_mask_high);
+
+ if (sizek < 4*1024*1024) {
+ mask.hi = address_mask_high;
+ mask.lo = ~((sizek << 10) -1);
+ }
+ else {
+ mask.hi = address_mask_high & (~((sizek >> 22) -1));
+ mask.lo = 0;
+ }
+
+ // it is recommended that we disable and enable cache when we
+ // do this.
+ disable_cache();
+
+ /* Bit 32-35 of MTRRphysMask should be set to 1 */
+ base.lo |= type;
+ mask.lo |= 0x800;
+ wrmsr (MTRRphysBase_MSR(reg), base);
+ wrmsr (MTRRphysMask_MSR(reg), mask);
+
+ enable_cache();
+}
+
+/* fms: find most sigificant bit set, stolen from Linux Kernel Source. */
+static inline unsigned int fms(unsigned int x)
+{
+ int r;
+
+ __asm__("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $0,%0\n"
+ "1:" : "=r" (r) : "g" (x));
+ return r;
+}
+
+/* fms: find least sigificant bit set */
+static inline unsigned int fls(unsigned int x)
+{
+ int r;
+
+ __asm__("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $32,%0\n"
+ "1:" : "=r" (r) : "g" (x));
+ return r;
+}
+
+/* setting up variable and fixed mtrr
+ *
+ * From Intel Vol. III Section 9.12.4, the Range Size and Base Alignment has
some kind of requirement:
+ * 1. The range size must be 2^N byte for N >= 12 (i.e 4KB minimum).
+ * 2. The base address must be 2^N aligned, where the N here is equal to
the N in previous
+ * requirement. So a 8K range must be 8K aligned not 4K aligned.
+ *
+ * These requirement is meet by "decompositing" the ramsize into Sum(Cn * 2^n,
n = [0..N], Cn = [0, 1]).
+ * For Cm = 1, there is a WB range of 2^m size at base address Sum(Cm * 2^m, m
= [N..n]).
+ * A 124MB (128MB - 4MB SMA) example:
+ * ramsize = 124MB == 64MB (at 0MB) + 32MB (at 64MB) + 16MB (at 96MB ) +
8MB (at 112MB) + 4MB (120MB).
+ * But this wastes a lot of MTRR registers so we use another more "aggresive"
way with Uncacheable Regions.
+ *
+ * In the Uncacheable Region scheme, we try to cover the whole ramsize by one
WB region as possible,
+ * If (an only if) this can not be done we will try to decomposite the
ramesize, the mathematical formula
+ * whould be ramsize = Sum(Cn * 2^n, n = [0..N], Cn = [-1, 0, 1]). For Cn =
-1, a Uncachable Region is used.
+ * The same 124MB example:
+ * ramsize = 124MB == 128MB WB (at 0MB) + 4MB UC (at 124MB)
+ * or a 156MB (128MB + 32MB - 4MB SMA) example:
+ * ramsize = 156MB == 128MB WB (at 0MB) + 32MB WB (at 128MB) + 4MB UC (at
156MB)
+ */
+/* 2 MTRRS are reserved for the operating system */
+#if 0
+#define BIOS_MTRRS 6
+#define OS_MTRRS 2
+#else
+#define BIOS_MTRRS 8
+#define OS_MTRRS 0
+#endif
+#define MTRRS (BIOS_MTRRS + OS_MTRRS)
+
+static void set_fixed_mtrrs(unsigned int first, unsigned int last, unsigned
char type)
+{
+ unsigned int i;
+ unsigned int fixed_msr = NUM_FIXED_RANGES >> 3;
+ struct msr msr;
+ msr.lo = msr.hi = 0; /* Shut up gcc */
+ for(i = first; i < last; i++) {
+ /* When I switch to a new msr read it in */
+ if (fixed_msr != i >> 3) {
+ /* But first write out the old msr */
+ if (fixed_msr < (NUM_FIXED_RANGES >> 3)) {
+ disable_cache();
+ wrmsr(mtrr_msr[fixed_msr], msr);
+ enable_cache();
+ }
+ fixed_msr = i>>3;
+ msr = rdmsr(mtrr_msr[fixed_msr]);
+ }
+ if ((i & 7) < 4) {
+ msr.lo &= ~(0xff << ((i&3)*8));
+ msr.lo |= type << ((i&3)*8);
+ } else {
+ msr.hi &= ~(0xff << ((i&3)*8));
+ msr.hi |= type << ((i&3)*8);
+ }
+ }
+ /* Write out the final msr */
+ if (fixed_msr < (NUM_FIXED_RANGES >> 3)) {
+ disable_cache();
+ wrmsr(mtrr_msr[fixed_msr], msr);
+ enable_cache();
+ }
+}
+
+static unsigned fixed_mtrr_index(unsigned long addrk)
+{
+ unsigned index;
+ index = (addrk - 0) >> 6;
+ if (index >= 8) {
+ index = ((addrk - 8*64) >> 4) + 8;
+ }
+ if (index >= 24) {
+ index = ((addrk - (8*64 + 16*16)) >> 2) + 24;
+ }
+ if (index > NUM_FIXED_RANGES) {
+ index = NUM_FIXED_RANGES;
+ }
+ return index;
+}
+
+static unsigned int range_to_mtrr(unsigned int reg,
+ unsigned long range_startk, unsigned long range_sizek,
+ unsigned long next_range_startk, unsigned char type, unsigned
address_bits)
+{
+ if (!range_sizek || (reg >= BIOS_MTRRS)) {
+ return reg;
+ }
+ while(range_sizek) {
+ unsigned long max_align, align;
+ unsigned long sizek;
+ /* Compute the maximum size I can make a range */
+ max_align = fls(range_startk);
+ align = fms(range_sizek);
+ if (align > max_align) {
+ align = max_align;
+ }
+ sizek = 1 << align;
+ printk(BIOS_DEBUG, "Setting variable MTRR %d, base: %4dMB,
range: %4dMB, type %s\n",
+ reg, range_startk >>10, sizek >> 10,
+ (type==MTRR_TYPE_UNCACHEABLE)?"UC":
+ ((type==MTRR_TYPE_WRBACK)?"WB":"Other")
+ );
+ set_var_mtrr_stage2(reg++, range_startk, sizek, type,
address_bits);
+ range_startk += sizek;
+ range_sizek -= sizek;
+ if (reg >= BIOS_MTRRS)
+ break;
+ }
+ return reg;
+}
+
+static unsigned long resk(u64 value)
+{
+ unsigned long resultk;
+ if (value < (1ULL << 42)) {
+ resultk = value >> 10;
+ }
+ else {
+ resultk = 0xffffffff;
+ }
+ return resultk;
+}
+
+static void set_fixed_mtrr_resource(void *gp, struct device *dev, struct
resource *res)
+{
+ unsigned int start_mtrr;
+ unsigned int last_mtrr;
+ start_mtrr = fixed_mtrr_index(resk(res->base));
+ last_mtrr = fixed_mtrr_index(resk((res->base + res->size)));
+ if (start_mtrr >= NUM_FIXED_RANGES) {
+ return;
+ }
+ printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: WB\n",
+ start_mtrr, last_mtrr);
+ set_fixed_mtrrs(start_mtrr, last_mtrr, MTRR_TYPE_WRBACK);
+
+}
+
+#ifndef CONFIG_VAR_MTRR_HOLE
+#define CONFIG_VAR_MTRR_HOLE 1
+#endif
+
+struct var_mtrr_state {
+ unsigned long range_startk, range_sizek;
+ unsigned int reg;
+#if CONFIG_VAR_MTRR_HOLE
+ unsigned long hole_startk, hole_sizek;
+#endif
+ unsigned address_bits;
+};
+
+void set_var_mtrr_resource(void *gp, struct device *dev, struct resource *res)
+{
+ struct var_mtrr_state *state = gp;
+ unsigned long basek, sizek;
+ if (state->reg >= BIOS_MTRRS)
+ return;
+ basek = resk(res->base);
+ sizek = resk(res->size);
+ /* See if I can merge with the last range
+ * Either I am below 1M and the fixed mtrrs handle it, or
+ * the ranges touch.
+ */
+ if ((basek <= 1024) || (state->range_startk + state->range_sizek ==
basek)) {
+ unsigned long endk = basek + sizek;
+ state->range_sizek = endk - state->range_startk;
+ return;
+ }
+ /* Write the range mtrrs */
+ if (state->range_sizek != 0) {
+#if CONFIG_VAR_MTRR_HOLE
+ if (state->hole_sizek == 0) {
+ /* We need to put that on to hole */
+ unsigned long endk = basek + sizek;
+ state->hole_startk = state->range_startk +
state->range_sizek;
+ state->hole_sizek = basek - state->hole_startk;
+ state->range_sizek = endk - state->range_startk;
+ return;
+ }
+#endif
+ state->reg = range_to_mtrr(state->reg, state->range_startk,
+ state->range_sizek, basek, MTRR_TYPE_WRBACK,
state->address_bits);
+#if CONFIG_VAR_MTRR_HOLE
+ state->reg = range_to_mtrr(state->reg, state->hole_startk,
+ state->hole_sizek, basek, MTRR_TYPE_UNCACHEABLE,
state->address_bits);
+#endif
+ state->range_startk = 0;
+ state->range_sizek = 0;
+#if CONFIG_VAR_MTRR_HOLE
+ state->hole_startk = 0;
+ state->hole_sizek = 0;
+#endif
+ }
+ /* Allocate an msr */
+ printk(BIOS_SPEW, " Allocate an msr - basek = %08x, sizek = %08x,\n",
basek, sizek);
+ state->range_startk = basek;
+ state->range_sizek = sizek;
+}
+
+void x86_setup_fixed_mtrrs(void)
+{
+ /* Try this the simple way of incrementally adding together
+ * mtrrs. If this doesn't work out we can get smart again
+ * and clear out the mtrrs.
+ */
+
+ printk(BIOS_DEBUG, "\n");
+ /* Initialized the fixed_mtrrs to uncached */
+ printk(BIOS_DEBUG, "Setting fixed MTRRs(%d-%d) Type: UC\n",
+ 0, NUM_FIXED_RANGES);
+ set_fixed_mtrrs(0, NUM_FIXED_RANGES, MTRR_TYPE_UNCACHEABLE);
+
+ /* Now see which of the fixed mtrrs cover ram.
+ */
+ search_global_resources(
+ IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM |
IORESOURCE_CACHEABLE,
+ set_fixed_mtrr_resource, NULL);
+ printk(BIOS_DEBUG, "DONE fixed MTRRs\n");
+
+ /* enable fixed MTRR */
+ printk(BIOS_SPEW, "call enable_fixed_mtrr()\n");
+ enable_fixed_mtrr();
+
+}
+void x86_setup_var_mtrrs(unsigned address_bits)
+/* this routine needs to know how many address bits a given processor
+ * supports. CPUs get grumpy when you set too many bits in
+ * their mtrr registers :( I would generically call cpuid here
+ * and find out how many physically supported but some cpus are
+ * buggy, and report more bits then they actually support.
+ */
+{
+ /* Try this the simple way of incrementally adding together
+ * mtrrs. If this doesn't work out we can get smart again
+ * and clear out the mtrrs.
+ */
+ struct var_mtrr_state var_state;
+
+ /* Cache as many memory areas as possible */
+ /* FIXME is there an algorithm for computing the optimal set of mtrrs?
+ * In some cases it is definitely possible to do better.
+ */
+ var_state.range_startk = 0;
+ var_state.range_sizek = 0;
+#if CONFIG_VAR_MTRR_HOLE
+ var_state.hole_startk = 0;
+ var_state.hole_sizek = 0;
+#endif
+ var_state.reg = 0;
+ var_state.address_bits = address_bits;
+ search_global_resources(
+ IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM |
IORESOURCE_CACHEABLE,
+ set_var_mtrr_resource, &var_state);
+
+ /* Write the last range */
+ var_state.reg = range_to_mtrr(var_state.reg, var_state.range_startk,
+ var_state.range_sizek, 0, MTRR_TYPE_WRBACK,
var_state.address_bits);
+#if CONFIG_VAR_MTRR_HOLE
+ var_state.reg = range_to_mtrr(var_state.reg, var_state.hole_startk,
+ var_state.hole_sizek, 0, MTRR_TYPE_UNCACHEABLE,
var_state.address_bits);
+#endif
+ printk(BIOS_DEBUG, "DONE variable MTRRs\n");
+ printk(BIOS_DEBUG, "Clear out the extra MTRR's\n");
+ /* Clear out the extra MTRR's */
+ while(var_state.reg < MTRRS) {
+ set_var_mtrr_stage2(var_state.reg++, 0, 0, 0,
var_state.address_bits);
+ }
+ printk(BIOS_SPEW, "call enable_var_mtrr()\n");
+ enable_var_mtrr();
+ printk(BIOS_SPEW, "Leave %s\n", __FUNCTION__);
+ post_code(0x6A);
+}
+
+void x86_setup_mtrrs(unsigned address_bits)
+{
+ x86_setup_fixed_mtrrs();
+ x86_setup_var_mtrrs(address_bits);
+}
+
+
+int x86_mtrr_check(void)
+{
+ /* Only Pentium Pro and later have MTRR */
+ struct msr msr;
+ printk(BIOS_DEBUG, "\nMTRR check\n");
+
+ msr = rdmsr(0x2ff);
+ msr.lo >>= 10;
+
+ printk(BIOS_DEBUG, "Fixed MTRRs : ");
+ if (msr.lo & 0x01)
+ printk(BIOS_DEBUG, "Enabled\n");
+ else
+ printk(BIOS_DEBUG, "Disabled\n");
+
+ printk(BIOS_DEBUG, "Variable MTRRs: ");
+ if (msr.lo & 0x02)
+ printk(BIOS_DEBUG, "Enabled\n");
+ else
+ printk(BIOS_DEBUG, "Disabled\n");
+
+ printk(BIOS_DEBUG, "\n");
+
+ post_code(0x93);
+ return ((int) msr.lo);
+}
Index: arch/x86/Makefile
===================================================================
--- arch/x86/Makefile (revision 1078)
+++ arch/x86/Makefile (working copy)
@@ -205,7 +205,7 @@
STAGE2_ARCH_X86_SRC = archtables.c coreboot_table.c multiboot.c udelay_io.c
STAGE2_ARCH_X86_SRC += pci_ops_auto.c pci_ops_conf1.c
-STAGE2_ARCH_X86_SRC += keyboard.c i8259.c isa-dma.c
+STAGE2_ARCH_X86_SRC += keyboard.c i8259.c isa-dma.c mtrr.c
ifeq ($(CONFIG_PIRQ_TABLE),y)
STAGE2_ARCH_X86_SRC += pirq_routing.c
--
coreboot mailing list: [email protected]
http://www.coreboot.org/mailman/listinfo/coreboot