This patch makes vsmp a paravirt client. It now uses the whole
infrastructure provided by pvops. When we detect we're running
a vsmp box, we change the irq-related paravirt operations (and so,
it have to happen quite early), and the patching function

Signed-off-by: Glauber de Oliveira Costa <[EMAIL PROTECTED]>
Signed-off-by: Steven Rostedt <[EMAIL PROTECTED]>
Acked-by: Jeremy Fitzhardinge <[EMAIL PROTECTED]>
---
 arch/x86/Kconfig.x86_64    |    3 +-
 arch/x86/kernel/setup_64.c |    3 ++
 arch/x86/kernel/vsmp_64.c  |   72 +++++++++++++++++++++++++++++++++++++++----
 include/asm-x86/setup.h    |    3 +-
 4 files changed, 71 insertions(+), 10 deletions(-)

diff --git a/arch/x86/Kconfig.x86_64 b/arch/x86/Kconfig.x86_64
index 04734dd..544bad5 100644
--- a/arch/x86/Kconfig.x86_64
+++ b/arch/x86/Kconfig.x86_64
@@ -148,15 +148,14 @@ config X86_PC
        bool "PC-compatible"
        help
          Choose this option if your computer is a standard PC or compatible.
-
 config X86_VSMP
        bool "Support for ScaleMP vSMP"
        depends on PCI
+       select PARAVIRT
         help
          Support for ScaleMP vSMP systems.  Say 'Y' here if this kernel is
          supposed to run on these EM64T-based machines.  Only choose this 
option
          if you have one of these machines.
-
 endchoice
 
 choice
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 44a11e3..c522549 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -335,6 +335,9 @@ void __init setup_arch(char **cmdline_p)
 
        init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
 
+#ifdef CONFIG_VSMP
+       vsmp_init();
+#endif
        dmi_scan_machine();
 
 #ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 414caf0..547d3b3 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -8,18 +8,70 @@
  *
  * Ravikiran Thirumalai <[EMAIL PROTECTED]>,
  * Shai Fultheim <[EMAIL PROTECTED]>
+ * Paravirt ops integration: Glauber de Oliveira Costa <[EMAIL PROTECTED]>
  */
-
 #include <linux/init.h>
 #include <linux/pci_ids.h>
 #include <linux/pci_regs.h>
 #include <asm/pci-direct.h>
 #include <asm/io.h>
+#include <asm/paravirt.h>
+
+/*
+ * Interrupt control for the VSMP architecture:
+ */
+
+static inline unsigned long vsmp_save_fl(void)
+{
+       unsigned long flags = native_save_fl();
+
+       if (flags & X86_EFLAGS_IF)
+               return X86_EFLAGS_IF;
+       return 0;
+}
 
-static int __init vsmp_init(void)
+static inline void vsmp_restore_fl(unsigned long flags)
+{
+       if (flags & X86_EFLAGS_IF)
+               flags &= ~X86_EFLAGS_AC;
+       if (!(flags & X86_EFLAGS_IF))
+               flags &= X86_EFLAGS_AC;
+       native_restore_fl(flags);
+}
+
+static inline void vsmp_irq_disable(void)
+{
+       unsigned long flags = native_save_fl();
+
+       vsmp_restore_fl((flags & ~X86_EFLAGS_IF));
+}
+
+static inline void vsmp_irq_enable(void)
+{
+       unsigned long flags = native_save_fl();
+
+       vsmp_restore_fl((flags | X86_EFLAGS_IF));
+}
+
+static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
+                                 unsigned long addr, unsigned len)
+{
+       switch (type) {
+       case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
+       case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
+       case PARAVIRT_PATCH(pv_irq_ops.save_fl):
+       case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
+               return paravirt_patch_default(type, clobbers, ibuf, addr, len);
+       default:
+               return native_patch(type, clobbers, ibuf, addr, len);
+       }
+
+}
+
+int __init vsmp_init(void)
 {
        void *address;
-       unsigned int cap, ctl;
+       unsigned int cap, ctl, cfg;
 
        if (!early_pci_allowed())
                return 0;
@@ -29,8 +81,16 @@ static int __init vsmp_init(void)
            (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != 
PCI_DEVICE_ID_SCALEMP_VSMP_CTL))
                return 0;
 
+       /* If we are, use the distinguished irq functions */
+       pv_irq_ops.irq_disable = vsmp_irq_disable;
+       pv_irq_ops.irq_enable  = vsmp_irq_enable;
+       pv_irq_ops.save_fl  = vsmp_save_fl;
+       pv_irq_ops.restore_fl  = vsmp_restore_fl;
+       pv_init_ops.patch = vsmp_patch;
+
        /* set vSMP magic bits to indicate vSMP capable kernel */
-       address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8);
+       cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
+       address = early_ioremap(cfg, 8);
        cap = readl(address);
        ctl = readl(address + 4);
        printk("vSMP CTL: capabilities:0x%08x  control:0x%08x\n", cap, ctl);
@@ -42,8 +102,6 @@ static int __init vsmp_init(void)
                printk("vSMP CTL: control set to:0x%08x\n", ctl);
        }
 
-       iounmap(address);
+       early_iounmap(address, 8);
        return 0;
 }
-
-core_initcall(vsmp_init);
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h
index 071e054..dd7996c 100644
--- a/include/asm-x86/setup.h
+++ b/include/asm-x86/setup.h
@@ -58,7 +58,8 @@ void __init add_memory_region(unsigned long long start,
 
 extern unsigned long init_pg_tables_end;
 
-
+/* For EM64T-based VSMP machines */
+int vsmp_init(void);
 
 #endif /* __i386__ */
 #endif /* _SETUP */
-- 
1.4.4.2

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to