nils toedtmann <[EMAIL PROTECTED]> writes:

> Moin *,
> 
> this is about combining SKAS & XEN without having any clue.

Just did that last week.  It's to be applied on top of
2.6.11-rc<recent> plus 2.6.11-v8-rc2 skas patch plus fix for that from
bodo (fished from devel list) plus a batch of xen patches maintained
by someone else ;)

Port from arch/i386 to arch/xen was mostly straight forward, the only
tricky thing is entry.S where you have to fixup the cli which isn't
allowed in arch/xen.

Enjoy,

  Gerd

Subject: skas for arch/xen
From: [EMAIL PROTECTED]
Patch-mainline: never

Virtualization unlimited: run uml machines within xen domains ;)

---
 arch/xen/i386/Kconfig                  |    4 +
 arch/xen/i386/kernel/entry.S           |   15 +++++-
 arch/xen/i386/kernel/ldt.c             |   54 ++++++++++++++-----------
 include/asm-xen/asm-i386/desc.h        |    3 +
 include/asm-xen/asm-i386/mmu_context.h |   19 +++++++-
 include/asm-xen/asm-i386/ptrace.h      |   22 ++++++++++
 6 files changed, 90 insertions(+), 27 deletions(-)

Index: linux-2.6.10/arch/xen/i386/Kconfig
===================================================================
--- linux-2.6.10.orig/arch/xen/i386/Kconfig     2005-02-23 08:25:31.000000000 
+0100
+++ linux-2.6.10/arch/xen/i386/Kconfig  2005-02-23 14:48:54.000000000 +0100
@@ -958,4 +958,8 @@ config PC
        depends on X86 && !EMBEDDED
        default y
 
+config PROC_MM
+       bool "/proc/mm support"
+       default y
+
 endmenu
Index: linux-2.6.10/include/asm-xen/asm-i386/ptrace.h
===================================================================
--- linux-2.6.10.orig/include/asm-xen/asm-i386/ptrace.h 2005-02-23 
08:25:31.000000000 +0100
+++ linux-2.6.10/include/asm-xen/asm-i386/ptrace.h      2005-02-23 
14:48:54.000000000 +0100
@@ -64,4 +64,26 @@ extern unsigned long profile_pc(struct p
 #endif
 #endif
 
+/*For SKAS3 support.*/
+#ifndef _LINUX_PTRACE_STRUCT_DEF
+#define _LINUX_PTRACE_STRUCT_DEF
+
+#define PTRACE_FAULTINFO         52
+#define PTRACE_SIGPENDING        53
+#define PTRACE_LDT               54
+#define PTRACE_SWITCH_MM         55
+
+struct ptrace_faultinfo {
+       int is_write;
+       unsigned long addr;
+};
+
+struct ptrace_ldt {
+       int func;
+       void *ptr;
+       unsigned long bytecount;
+};
+
+#endif /*ifndef _LINUX_PTRACE_STRUCT_DEF*/
+
 #endif
Index: linux-2.6.10/arch/xen/i386/kernel/ldt.c
===================================================================
--- linux-2.6.10.orig/arch/xen/i386/kernel/ldt.c        2005-02-23 
08:25:31.000000000 +0100
+++ linux-2.6.10/arch/xen/i386/kernel/ldt.c     2005-02-23 14:58:10.000000000 
+0100
@@ -18,6 +18,7 @@
 #include <asm/system.h>
 #include <asm/ldt.h>
 #include <asm/desc.h>
+#include <asm/mmu_context.h>
 
 #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
 static void flush_ldt(void *null)
@@ -29,11 +30,12 @@ static void flush_ldt(void *null)
 }
 #endif
 
-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+static int alloc_ldt(struct mm_struct *mm, int mincount, int reload)
 {
        void *oldldt;
        void *newldt;
        int oldsize;
+       mm_context_t * pc = &mm->context;
 
        if (mincount <= pc->size)
                return 0;
@@ -63,11 +65,12 @@ static int alloc_ldt(mm_context_t *pc, i
 #endif
                make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
                                    PAGE_SIZE);
-               load_LDT(pc);
+               if (&current->active_mm->context == pc)
+                       load_LDT(pc);
                flush_page_update_queue();
 #ifdef CONFIG_SMP
                mask = cpumask_of_cpu(smp_processor_id());
-               if (!cpus_equal(current->mm->cpu_vm_mask, mask))
+               if (!cpus_equal(mm->cpu_vm_mask, mask))
                        smp_call_function(flush_ldt, NULL, 1, 1);
                preempt_enable();
 #endif
@@ -84,13 +87,13 @@ static int alloc_ldt(mm_context_t *pc, i
        return 0;
 }
 
-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+static inline int copy_ldt(struct mm_struct *new, struct mm_struct *old)
 {
-       int err = alloc_ldt(new, old->size, 0);
+       int err = alloc_ldt(new, old->context.size, 0);
        if (err < 0)
                return err;
-       memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
-       make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
+       memcpy(new->context.ldt, old->context.ldt, 
old->context.size*LDT_ENTRY_SIZE);
+       make_pages_readonly(new->context.ldt, (new->context.size * 
LDT_ENTRY_SIZE) /
                            PAGE_SIZE);
        flush_page_update_queue();
        return 0;
@@ -100,22 +103,24 @@ static inline int copy_ldt(mm_context_t 
  * we do not have to muck with descriptors here, that is
  * done in switch_mm() as needed.
  */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+int copy_context(struct mm_struct *mm, struct mm_struct *old_mm)
 {
-       struct mm_struct * old_mm;
        int retval = 0;
 
-       init_MUTEX(&mm->context.sem);
-       mm->context.size = 0;
-       old_mm = current->mm;
        if (old_mm && old_mm->context.size > 0) {
                down(&old_mm->context.sem);
-               retval = copy_ldt(&mm->context, &old_mm->context);
+               retval = copy_ldt(mm, old_mm);
                up(&old_mm->context.sem);
        }
        return retval;
 }
 
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+       init_new_empty_context(mm);
+       return copy_context(mm, current->mm);
+}
+
 /*
  * No need to lock the MM as we are the last user
  */
@@ -136,11 +141,11 @@ void destroy_context(struct mm_struct *m
        }
 }
 
-static int read_ldt(void __user * ptr, unsigned long bytecount)
+static int read_ldt(struct mm_struct * mm, void __user * ptr,
+                   unsigned long bytecount)
 {
        int err;
        unsigned long size;
-       struct mm_struct * mm = current->mm;
 
        if (!mm->context.size)
                return 0;
@@ -189,9 +194,8 @@ static int read_default_ldt(void __user 
        return err;
 }
 
-static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
+static int write_ldt(struct mm_struct * mm, void __user * ptr, unsigned long 
bytecount, int oldmode)
 {
-       struct mm_struct * mm = current->mm;
        __u32 entry_1, entry_2, *lp;
        unsigned long mach_lp;
        int error;
@@ -216,7 +220,7 @@ static int write_ldt(void __user * ptr, 
 
        down(&mm->context.sem);
        if (ldt_info.entry_number >= mm->context.size) {
-               error = alloc_ldt(&current->mm->context, 
ldt_info.entry_number+1, 1);
+               error = alloc_ldt(mm, ldt_info.entry_number+1, 1);
                if (error < 0)
                        goto out_unlock;
        }
@@ -248,23 +252,29 @@ out:
        return error;
 }
 
-asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long 
bytecount)
+int __modify_ldt(struct mm_struct * mm, int func, void __user *ptr,
+              unsigned long bytecount)
 {
        int ret = -ENOSYS;
 
        switch (func) {
        case 0:
-               ret = read_ldt(ptr, bytecount);
+               ret = read_ldt(mm, ptr, bytecount);
                break;
        case 1:
-               ret = write_ldt(ptr, bytecount, 1);
+               ret = write_ldt(mm, ptr, bytecount, 1);
                break;
        case 2:
                ret = read_default_ldt(ptr, bytecount);
                break;
        case 0x11:
-               ret = write_ldt(ptr, bytecount, 0);
+               ret = write_ldt(mm, ptr, bytecount, 0);
                break;
        }
        return ret;
 }
+
+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long 
bytecount)
+{
+       return __modify_ldt(current->mm, func, ptr, bytecount);
+}
Index: linux-2.6.10/include/asm-xen/asm-i386/mmu_context.h
===================================================================
--- linux-2.6.10.orig/include/asm-xen/asm-i386/mmu_context.h    2005-02-23 
08:25:31.000000000 +0100
+++ linux-2.6.10/include/asm-xen/asm-i386/mmu_context.h 2005-02-23 
15:00:27.000000000 +0100
@@ -6,13 +6,25 @@
 #include <asm/atomic.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
+#include <asm/semaphore.h>
 
 /*
- * Used for LDT copy/destruction.
+ * Used for LDT initialization/destruction. You cannot copy an LDT with
+ * init_new_context, since it thinks you are passing it a new LDT and won't
+ * deallocate its old content.
  */
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
+/* LDT initialization for a clean environment - needed for SKAS.*/
+static inline void init_new_empty_context(struct mm_struct *mm)
+{
+       init_MUTEX(&mm->context.sem);
+       mm->context.size = 0;
+}
+
+/* LDT copy for SKAS - for the above problem.*/
+int copy_context(struct mm_struct *mm, struct mm_struct *old_mm);
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct 
*tsk)
 {
@@ -29,6 +41,10 @@ static inline void switch_mm(struct mm_s
 {
        int cpu = smp_processor_id();
 
+#ifdef CONFIG_SMP
+       prev = per_cpu(cpu_tlbstate, cpu).active_mm;
+#endif
+
        if (likely(prev != next)) {
                /* stop flush ipis for the previous mm */
                cpu_clear(cpu, prev->cpu_vm_mask);
@@ -50,7 +66,6 @@ static inline void switch_mm(struct mm_s
 #ifdef CONFIG_SMP
        else {
                per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
-               BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
 
                if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
                        /* We were in lazy tlb mode and leave_mm disabled 
Index: linux-2.6.10/include/asm-xen/asm-i386/desc.h
===================================================================
--- linux-2.6.10.orig/include/asm-xen/asm-i386/desc.h   2005-02-23 
08:25:31.000000000 +0100
+++ linux-2.6.10/include/asm-xen/asm-i386/desc.h        2005-02-23 
15:03:45.000000000 +0100
@@ -128,6 +128,9 @@ static inline void load_LDT(mm_context_t
        put_cpu();
 }
 
+extern int __modify_ldt(struct mm_struct * mm, int func, void __user *ptr,
+                     unsigned long bytecount);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif
Index: linux-2.6.10/arch/xen/i386/kernel/entry.S
===================================================================
--- linux-2.6.10.orig/arch/xen/i386/kernel/entry.S      2005-02-23 
08:25:31.000000000 +0100
+++ linux-2.6.10/arch/xen/i386/kernel/entry.S   2005-02-23 15:51:32.000000000 
+0100
@@ -244,7 +244,7 @@ sysenter_past_esp:
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
 
-       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+       testb 
$(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
        jnz syscall_trace_entry
        cmpl $(nr_syscalls), %eax
        jae syscall_badsys
@@ -267,8 +267,8 @@ ENTRY(system_call)
        pushl %eax                      # save orig_eax
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
-                                       # system call tracing in operation
-       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+                                       # system call tracing in operation / 
emulation
+       testb 
$(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
        jnz syscall_trace_entry
        cmpl $(nr_syscalls), %eax
        jae syscall_badsys
@@ -344,10 +344,19 @@ syscall_trace_entry:
        movl %esp, %eax
        xorl %edx,%edx
        call do_syscall_trace
+       cmpl $0, %eax
+       jne syscall_skip                # ret != 0 -> running under 
PTRACE_SYSEMU,
+                                       # so must skip actual syscall
        movl ORIG_EAX(%esp), %eax
        cmpl $(nr_syscalls), %eax
        jnae syscall_call
        jmp syscall_exit
+syscall_skip:
+       XEN_BLOCK_EVENTS(%esi)          # make sure we don't miss an interrupt
+                                       # setting need_resched or sigpending
+                                       # between sampling and the iret
+       movl TI_flags(%ebp), %ecx
+       jmp work_pending
 
        # perform syscall exit tracing
        ALIGN


-------------------------------------------------------
SF email is sponsored by - The IT Product Guide
Read honest & candid reviews on hundreds of IT Products from real users.
Discover which products truly live up to the hype. Start reading now.
http://ads.osdn.com/?ad_id=6595&alloc_id=14396&op=click
_______________________________________________
User-mode-linux-user mailing list
User-mode-linux-user@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/user-mode-linux-user

Reply via email to