Author: kmacy
Date: Wed Nov 25 01:52:36 2009
New Revision: 199773
URL: http://svn.freebsd.org/changeset/base/199773

Log:
  MFC xen pmap updates and eflags fixes

Modified:
  stable/8/sys/i386/i386/vm_machdep.c
  stable/8/sys/i386/include/cpufunc.h
  stable/8/sys/i386/xen/pmap.c
  stable/8/sys/i386/xen/xen_machdep.c

Modified: stable/8/sys/i386/i386/vm_machdep.c
==============================================================================
--- stable/8/sys/i386/i386/vm_machdep.c Wed Nov 25 01:51:07 2009        
(r199772)
+++ stable/8/sys/i386/i386/vm_machdep.c Wed Nov 25 01:52:36 2009        
(r199773)
@@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/mutex.h>
 #include <sys/pioctl.h>
 #include <sys/proc.h>
+#include <sys/sysent.h>
 #include <sys/sf_buf.h>
 #include <sys/smp.h>
 #include <sys/sched.h>
@@ -270,11 +271,7 @@ cpu_fork(td1, p2, td2, flags)
        /*
         * XXX XEN need to check on PSL_USER is handled
         */
-#ifdef XEN
-       td2->td_md.md_saved_flags = 0;
-#else  
        td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
-#endif
        /*
         * Now, cpu_switch() can schedule the new process.
         * pcb_esp is loaded pointing to the cpu_switch() stack frame
@@ -446,11 +443,7 @@ cpu_set_upcall(struct thread *td, struct
 
        /* Setup to release spin count in fork_exit(). */
        td->td_md.md_spinlock_count = 1;
-#ifdef XEN     
-       td->td_md.md_saved_flags = 0;   
-#else
        td->td_md.md_saved_flags = PSL_KERNEL | PSL_I;
-#endif
 }
 
 /*

Modified: stable/8/sys/i386/include/cpufunc.h
==============================================================================
--- stable/8/sys/i386/include/cpufunc.h Wed Nov 25 01:51:07 2009        
(r199772)
+++ stable/8/sys/i386/include/cpufunc.h Wed Nov 25 01:52:36 2009        
(r199773)
@@ -49,8 +49,8 @@ extern u_int xen_rcr2(void);
 extern void xen_load_cr3(u_int data);
 extern void xen_tlb_flush(void);
 extern void xen_invlpg(u_int addr);
-extern int xen_save_and_cli(void);
-extern void xen_restore_flags(u_int eflags);
+extern void write_eflags(u_int eflags);
+extern u_int read_eflags(void);
 #endif
 
 struct region_descriptor;
@@ -293,7 +293,11 @@ ia32_pause(void)
 }
 
 static __inline u_int
+#ifdef XEN
+_read_eflags(void)
+#else  
 read_eflags(void)
+#endif
 {
        u_int   ef;
 
@@ -335,7 +339,11 @@ wbinvd(void)
 }
 
 static __inline void
+#ifdef XEN
+_write_eflags(u_int ef)
+#else
 write_eflags(u_int ef)
+#endif
 {
        __asm __volatile("pushl %0; popfl" : : "r" (ef));
 }
@@ -653,23 +661,15 @@ intr_disable(void)
 {
        register_t eflags;
 
-#ifdef XEN
-       eflags = xen_save_and_cli();
-#else  
        eflags = read_eflags();
        disable_intr();
-#endif 
        return (eflags);
 }
 
 static __inline void
 intr_restore(register_t eflags)
 {
-#ifdef XEN
-       xen_restore_flags(eflags);
-#else
        write_eflags(eflags);
-#endif
 }
 
 #else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */

Modified: stable/8/sys/i386/xen/pmap.c
==============================================================================
--- stable/8/sys/i386/xen/pmap.c        Wed Nov 25 01:51:07 2009        
(r199772)
+++ stable/8/sys/i386/xen/pmap.c        Wed Nov 25 01:52:36 2009        
(r199773)
@@ -223,6 +223,8 @@ static uma_zone_t pdptzone;
 #endif
 #endif
 
+static int pat_works;                  /* Is page attribute table sane? */
+
 /*
  * Data for the pv entry allocation mechanism
  */
@@ -277,7 +279,7 @@ static struct mtx PMAP2mutex;
 
 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
 static int pg_ps_enabled;
-SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RD, &pg_ps_enabled, 0,
+SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
     "Are large page mappings enabled?");
 
 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
@@ -311,6 +313,7 @@ static vm_offset_t pmap_kmem_choose(vm_o
 static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr);
 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
 
+static __inline void pagezero(void *page);
 
 #if defined(PAE) && !defined(XEN)
 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int 
wait);
@@ -328,22 +331,6 @@ CTASSERT(KERNBASE % (1 << 24) == 0);
 
 
 
-static __inline void
-pagezero(void *page)
-{
-#if defined(I686_CPU)
-       if (cpu_class == CPUCLASS_686) {
-#if defined(CPU_ENABLE_SSE)
-               if (cpu_feature & CPUID_SSE2)
-                       sse2_pagezero(page);
-               else
-#endif
-                       i686_pagezero(page);
-       } else
-#endif
-               bzero(page, PAGE_SIZE);
-}
-
 void 
 pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type)
 {
@@ -529,33 +516,36 @@ pmap_init_pat(void)
        if (!(cpu_feature & CPUID_PAT))
                return;
 
-#ifdef PAT_WORKS
-       /*
-        * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
-        * Program 4 and 5 as WP and WC.
-        * Leave 6 and 7 as UC and UC-.
-        */
-       pat_msr = rdmsr(MSR_PAT);
-       pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
-       pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
-           PAT_VALUE(5, PAT_WRITE_COMBINING);
-#else
-       /*
-        * Due to some Intel errata, we can only safely use the lower 4
-        * PAT entries.  Thus, just replace PAT Index 2 with WC instead
-        * of UC-.
-        *
-        *   Intel Pentium III Processor Specification Update
-        * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
-        * or Mode C Paging)
-        *
-        *   Intel Pentium IV  Processor Specification Update
-        * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
-        */
-       pat_msr = rdmsr(MSR_PAT);
-       pat_msr &= ~PAT_MASK(2);
-       pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
-#endif
+       if (cpu_vendor_id != CPU_VENDOR_INTEL ||
+           (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) {
+               /*
+                * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
+                * Program 4 and 5 as WP and WC.
+                * Leave 6 and 7 as UC and UC-.
+                */
+               pat_msr = rdmsr(MSR_PAT);
+               pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
+               pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
+                   PAT_VALUE(5, PAT_WRITE_COMBINING);
+               pat_works = 1;
+       } else {
+               /*
+                * Due to some Intel errata, we can only safely use the lower 4
+                * PAT entries.  Thus, just replace PAT Index 2 with WC instead
+                * of UC-.
+                *
+                *   Intel Pentium III Processor Specification Update
+                * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
+                * or Mode C Paging)
+                *
+                *   Intel Pentium IV  Processor Specification Update
+                * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
+                */
+               pat_msr = rdmsr(MSR_PAT);
+               pat_msr &= ~PAT_MASK(2);
+               pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
+               pat_works = 0;
+       }
        wrmsr(MSR_PAT, pat_msr);
 }
 
@@ -784,44 +774,48 @@ pmap_cache_bits(int mode, boolean_t is_p
        }
        
        /* Map the caching mode to a PAT index. */
-       switch (mode) {
-#ifdef PAT_WORKS
-       case PAT_UNCACHEABLE:
-               pat_index = 3;
-               break;
-       case PAT_WRITE_THROUGH:
-               pat_index = 1;
-               break;
-       case PAT_WRITE_BACK:
-               pat_index = 0;
-               break;
-       case PAT_UNCACHED:
-               pat_index = 2;
-               break;
-       case PAT_WRITE_COMBINING:
-               pat_index = 5;
-               break;
-       case PAT_WRITE_PROTECTED:
-               pat_index = 4;
-               break;
-#else
-       case PAT_UNCACHED:
-       case PAT_UNCACHEABLE:
-       case PAT_WRITE_PROTECTED:
-               pat_index = 3;
-               break;
-       case PAT_WRITE_THROUGH:
-               pat_index = 1;
-               break;
-       case PAT_WRITE_BACK:
-               pat_index = 0;
-               break;
-       case PAT_WRITE_COMBINING:
-               pat_index = 2;
-               break;
-#endif
-       default:
-               panic("Unknown caching mode %d\n", mode);
+       if (pat_works) {
+               switch (mode) {
+                       case PAT_UNCACHEABLE:
+                               pat_index = 3;
+                               break;
+                       case PAT_WRITE_THROUGH:
+                               pat_index = 1;
+                               break;
+                       case PAT_WRITE_BACK:
+                               pat_index = 0;
+                               break;
+                       case PAT_UNCACHED:
+                               pat_index = 2;
+                               break;
+                       case PAT_WRITE_COMBINING:
+                               pat_index = 5;
+                               break;
+                       case PAT_WRITE_PROTECTED:
+                               pat_index = 4;
+                               break;
+                       default:
+                               panic("Unknown caching mode %d\n", mode);
+               }
+       } else {
+               switch (mode) {
+                       case PAT_UNCACHED:
+                       case PAT_UNCACHEABLE:
+                       case PAT_WRITE_PROTECTED:
+                               pat_index = 3;
+                               break;
+                       case PAT_WRITE_THROUGH:
+                               pat_index = 1;
+                               break;
+                       case PAT_WRITE_BACK:
+                               pat_index = 0;
+                               break;
+                       case PAT_WRITE_COMBINING:
+                               pat_index = 2;
+                               break;
+                       default:
+                               panic("Unknown caching mode %d\n", mode);
+               }
        }       
 
        /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
@@ -1735,7 +1729,7 @@ retry:
  * Deal with a SMP shootdown of other users of the pmap that we are
  * trying to dispose of.  This can be a bit hairy.
  */
-static u_int *lazymask;
+static cpumask_t *lazymask;
 static u_int lazyptd;
 static volatile u_int lazywait;
 
@@ -1744,7 +1738,7 @@ void pmap_lazyfix_action(void);
 void
 pmap_lazyfix_action(void)
 {
-       u_int mymask = PCPU_GET(cpumask);
+       cpumask_t mymask = PCPU_GET(cpumask);
 
 #ifdef COUNT_IPIS
        (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
@@ -1756,7 +1750,7 @@ pmap_lazyfix_action(void)
 }
 
 static void
-pmap_lazyfix_self(u_int mymask)
+pmap_lazyfix_self(cpumask_t mymask)
 {
 
        if (rcr3() == lazyptd)
@@ -1768,8 +1762,7 @@ pmap_lazyfix_self(u_int mymask)
 static void
 pmap_lazyfix(pmap_t pmap)
 {
-       u_int mymask;
-       u_int mask;
+       cpumask_t mymask, mask;
        u_int spins;
 
        while ((mask = pmap->pm_active) != 0) {
@@ -3110,7 +3103,7 @@ pmap_kenter_temporary(vm_paddr_t pa, int
        vm_offset_t va;
 
        va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
-       pmap_kenter(va, pa);
+       PT_SET_MA(va, (pa & ~PAGE_MASK) | PG_V | pgeflag);
        invlpg(va);
        return ((void *)crashdumpmap);
 }
@@ -3343,6 +3336,22 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm
        PMAP_UNLOCK(dst_pmap);
 }      
 
+static __inline void
+pagezero(void *page)
+{
+#if defined(I686_CPU)
+       if (cpu_class == CPUCLASS_686) {
+#if defined(CPU_ENABLE_SSE)
+               if (cpu_feature & CPUID_SSE2)
+                       sse2_pagezero(page);
+               else
+#endif
+                       i686_pagezero(page);
+       } else
+#endif
+               bzero(page, PAGE_SIZE);
+}
+
 /*
  *     pmap_zero_page zeros the specified hardware page by mapping 
  *     the page into KVM and using bzero to clear its contents.
@@ -4162,7 +4171,6 @@ pmap_activate(struct thread *td)
        td->td_pcb->pcb_cr3 = cr3;
        PT_UPDATES_FLUSH();
        load_cr3(cr3);
-               
        PCPU_SET(curpmap, pmap);
        critical_exit();
 }

Modified: stable/8/sys/i386/xen/xen_machdep.c
==============================================================================
--- stable/8/sys/i386/xen/xen_machdep.c Wed Nov 25 01:51:07 2009        
(r199772)
+++ stable/8/sys/i386/xen/xen_machdep.c Wed Nov 25 01:52:36 2009        
(r199773)
@@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/bus.h>
+#include <sys/ktr.h>
 #include <sys/lock.h>
 #include <sys/mount.h>
 #include <sys/malloc.h>
@@ -101,6 +102,7 @@ void ni_sti(void);
 void
 ni_cli(void)
 {
+       CTR0(KTR_SPARE2, "ni_cli disabling interrupts");
        __asm__("pushl %edx;"
                "pushl %eax;"
                );
@@ -345,33 +347,53 @@ xen_load_cr3(u_int val)
        PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
-void
-xen_restore_flags(u_int eflags)
+#ifdef KTR
+static __inline u_int
+rebp(void)
 {
-       if (eflags > 1)
-               eflags = ((eflags & PSL_I) == 0);
+       u_int   data;
 
-       __restore_flags(eflags);
+       __asm __volatile("movl 4(%%ebp),%0" : "=r" (data));     
+       return (data);
 }
+#endif
 
-int
-xen_save_and_cli(void)
+u_int
+read_eflags(void)
 {
-       int eflags;
-       
-       __save_and_cli(eflags);
+        vcpu_info_t *_vcpu;
+       u_int eflags;
+
+       eflags = _read_eflags();
+        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; 
+       if (_vcpu->evtchn_upcall_mask)
+               eflags &= ~PSL_I;
+
        return (eflags);
 }
 
 void
+write_eflags(u_int eflags)
+{
+       u_int intr;
+
+       CTR2(KTR_SPARE2, "%x xen_restore_flags eflags %x", rebp(), eflags);
+       intr = ((eflags & PSL_I) == 0);
+       __restore_flags(intr);
+       _write_eflags(eflags);
+}
+
+void
 xen_cli(void)
 {
+       CTR1(KTR_SPARE2, "%x xen_cli disabling interrupts", rebp());
        __cli();
 }
 
 void
 xen_sti(void)
 {
+       CTR1(KTR_SPARE2, "%x xen_sti enabling interrupts", rebp());
        __sti();
 }
 
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to