[v1][PATCH 0/7] powerpc/book3e: support kexec and kdump

2013-02-26 Thread Tiejun Chen
This patchset is used to support kexec and kdump on book3e.

Tested on fsl-p5040 DS.

v1:
--
* improve some patch head
* rebase on next branch with patch 7

Tiejun Chen (7):
  powerpc/book3e: support CONFIG_RELOCATABLE
  book3e/kexec/kdump: enable kexec for kernel
  book3e/kexec/kdump: create a 1:1 TLB mapping
  book3e/kexec/kdump: introduce a kexec kernel flag
  book3e/kexec/kdump: implement ppc64 kexec specfic
  book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET
  book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

 arch/powerpc/Kconfig |2 +-
 arch/powerpc/include/asm/exception-64e.h |8 
 arch/powerpc/include/asm/page.h  |2 +
 arch/powerpc/include/asm/smp.h   |3 ++
 arch/powerpc/kernel/exceptions-64e.S |   15 ++-
 arch/powerpc/kernel/head_64.S|   47 +++--
 arch/powerpc/kernel/machine_kexec_64.c   |6 +++
 arch/powerpc/kernel/misc_64.S|   67 +-
 arch/powerpc/lib/feature-fixups.c|7 
 arch/powerpc/platforms/85xx/smp.c|   27 
 10 files changed, 178 insertions(+), 6 deletions(-)

Thanks
Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 6/7] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

2013-02-26 Thread Tiejun Chen
Book3e is always aligned 1GB to create TLB so we should
use (KERNELBASE - MEMORY_START) as VIRT_PHYS_OFFSET to
get __pa/__va properly while boot kdump.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/page.h |2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f072e97..2cba08a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -112,6 +112,8 @@ extern long long virt_phys_offset;
 /* See Description below for VIRT_PHYS_OFFSET */
 #ifdef CONFIG_RELOCATABLE_PPC32
 #define VIRT_PHYS_OFFSET virt_phys_offset
+#elif defined(CONFIG_PPC_BOOK3E_64)
+#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START)
 #else
 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 #endif
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 5/7] book3e/kexec/kdump: implement ppc64 kexec specfic

2013-02-26 Thread Tiejun Chen
ppc64 kexec mechanism has a different implementation with ppc32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/platforms/85xx/smp.c |   13 +
 1 file changed, 13 insertions(+)

diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index 8beef93..af2a7e8 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -280,6 +280,7 @@ struct smp_ops_t smp_85xx_ops = {
 };
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC32
 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 
 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
@@ -298,6 +299,14 @@ static void mpc85xx_smp_kexec_down(void *arg)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0,1);
 }
+#else
+void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+   local_irq_disable();
+   hard_irq_disable();
+   mpic_teardown_this_cpu(secondary);
+}
+#endif
 
 static void map_and_flush(unsigned long paddr)
 {
@@ -349,11 +358,14 @@ static void mpc85xx_smp_flush_dcache_kexec(struct kimage 
*image)
 
 static void mpc85xx_smp_machine_kexec(struct kimage *image)
 {
+#ifdef CONFIG_PPC32
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
+#endif
 
mpc85xx_smp_flush_dcache_kexec(image);
 
+#ifdef CONFIG_PPC32
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 
@@ -371,6 +383,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);
}
+#endif
 
default_machine_kexec(image);
 }
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag

2013-02-26 Thread Tiejun Chen
We need to introduce a flag to indicate we're already running
a kexec kernel then we can go proper path. For example, We
shouldn't access spin_table from the bootloader to up any secondary
cpu for kexec kernel, and kexec kernel already know how to jump to
generic_secondary_smp_init.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/smp.h|3 +++
 arch/powerpc/kernel/head_64.S |   12 
 arch/powerpc/kernel/misc_64.S |6 ++
 arch/powerpc/platforms/85xx/smp.c |   14 ++
 4 files changed, 35 insertions(+)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 195ce2a..161a5912 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -198,6 +198,9 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+extern unsigned long __run_at_kexec;
+#endif
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index a93ca67..038e81d 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -89,6 +89,12 @@ __secondary_hold_spinloop:
 __secondary_hold_acknowledge:
.llong  0x0
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   .globl  __run_at_kexec
+__run_at_kexec:
+   .llong  0x0 /* Flag for the secondary kernel from kexec. */
+#endif
+
 #ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
 * at the loaded address instead of the linked address.  This
@@ -417,6 +423,12 @@ _STATIC(__after_prom_start)
 #if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
 #endif
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   /* If relocated we need to restore this flag on that relocated address. 
*/
+   ld  r7,__run_at_kexec-_stext(r26)
+   std r7,__run_at_kexec-_stext(r26)
+#endif
+
lwz r7,__run_at_load-_stext(r26)
 #if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26) /* Restore for the remains. */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ffe6043..b81f8ac 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -608,6 +608,12 @@ _GLOBAL(kexec_sequence)
bl  .copy_and_flush /* (dest, src, copy limit, start offset) */
 1: /* assume normal blr return */
 
+   /* notify we're going into kexec kernel for SMP. */
+   LOAD_REG_ADDR(r3,__run_at_kexec)
+   li  r4,1
+   std r4,0(r3)
+   sync
+
/* release other cpus to the new kernel secondary start at 0x60 */
mflrr5
li  r6,1
diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index 148c2f2..8beef93 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -150,6 +150,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
int hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
int ret = 0;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   unsigned long *ptr;
+#endif
 
WARN_ON(nr < 0 || nr >= NR_CPUS);
WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -238,11 +241,22 @@ out:
 #else
smp_generic_kick_cpu(nr);
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);
+   /* We shouldn't access spin_table from the bootloader to up any
+* secondary cpu for kexec kernel, and kexec kernel already
+* know how to jump to generic_secondary_smp_init.
+*/
+   if (!*ptr) {
+#endif
flush_spin_table(spin_table);
out_be32(&spin_table->pir, hw_cpu);
out_be64((u64 *)(&spin_table->addr_h),
  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
flush_spin_table(spin_table);
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   }
+#endif
 #endif
 
local_irq_restore(flags);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 3/7] book3e/kexec/kdump: create a 1:1 TLB mapping

2013-02-26 Thread Tiejun Chen
book3e have no real MMU mode so we have to create a 1:1 TLB
mapping to make sure we can access the real physical address.
And correct something to support this pseudo real mode on book3e.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/head_64.S |9 ---
 arch/powerpc/kernel/misc_64.S |   55 -
 2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b07ed784..a93ca67 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -444,12 +444,12 @@ _STATIC(__after_prom_start)
tovirt(r3,r3)   /* on booke, we already run at 
PAGE_OFFSET */
 #endif
mr. r4,r26  /* In some cases the loader may  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r4,r4)
+#endif
beq 9f  /* have already put us at zero */
li  r6,0x100/* Start offset, the first 0x100 */
/* bytes were copied earlier.*/
-#ifdef CONFIG_PPC_BOOK3E
-   tovirt(r6,r6)   /* on booke, we already run at 
PAGE_OFFSET */
-#endif
 
 #ifdef CONFIG_RELOCATABLE
 /*
@@ -492,6 +492,9 @@ _STATIC(__after_prom_start)
 p_end: .llong  _end - _stext
 
 4: /* Now copy the rest of the kernel up to _end */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26)
+#endif
addis   r5,r26,(p_end - _stext)@ha
ld  r5,(p_end - _stext)@l(r5)   /* get _end */
 5: bl  .copy_and_flush /* copy the rest */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c2acf8c..ffe6043 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -449,6 +449,49 @@ kexec_flag:
 
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/* BOOK3E have no a real MMU mode so we have to setup the initial TLB
+ * for a core to map v:0 to p:0 as 1:1. This current implementation
+ * assume that 1G is enough for kexec.
+ */
+#include 
+kexec_create_tlb:
+   /* Invalidate all TLBs to avoid any TLB conflict. */
+   PPC_TLBILX_ALL(0,R0)
+   sync
+   isync
+
+   mfspr   r10,SPRN_TLB1CFG
+   andi.   r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
+   subir10,r10,1   /* Often its always safe to use last */
+   lis r9,MAS0_TLBSEL(1)@h
+   rlwimi  r9,r10,16,4,15  /* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it.
+ */
+#ifdef CONFIG_SMP
+#define M_IF_SMP   MAS2_M
+#else
+#define M_IF_SMP   0
+#endif
+   mtspr   SPRN_MAS0,r9
+
+   lis r9,(MAS1_VALID|MAS1_IPROT)@h
+   ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+   mtspr   SPRN_MAS1,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP)
+   mtspr   SPRN_MAS2,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+   mtspr   SPRN_MAS3,r9
+   li  r9,0
+   mtspr   SPRN_MAS7,r9
+
+   tlbwe
+   isync
+   blr
+#endif
 
 /* kexec_smp_wait(void)
  *
@@ -462,6 +505,10 @@ kexec_flag:
  */
 _GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
+#ifdef CONFIG_PPC_BOOK3E
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
+#endif
bl  real_mode
 
li  r4,KEXEC_STATE_REAL_MODE
@@ -478,6 +525,7 @@ _GLOBAL(kexec_smp_wait)
  * don't overwrite r3 here, it is live for kexec_wait above.
  */
 real_mode: /* assume normal blr return */
+#ifndef CONFIG_PPC_BOOK3E
 1: li  r9,MSR_RI
li  r10,MSR_DR|MSR_IR
mflrr11 /* return address to SRR0 */
@@ -489,7 +537,10 @@ real_mode: /* assume normal blr return */
mtspr   SPRN_SRR1,r10
mtspr   SPRN_SRR0,r11
rfid
-
+#else
+   /* the real mode is nothing for book3e. */
+   blr
+#endif
 
 /*
  * kexec_sequence(newstack, start, image, control, clear_all())
@@ -538,6 +589,8 @@ _GLOBAL(kexec_sequence)
mtmsrd  r3,1
 #else
wrteei  0
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
 #endif
 
/* copy dest pages, flush whole dest image */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 7/7] book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

2013-02-26 Thread Tiejun Chen
In commit 96f013f, "powerpc/kexec: Add kexec "hold" support for Book3e
processors", requires that GPR4 survive the "hold" process, for IBM Blue
Gene/Q with with some very strange firmware. But for FSL Book3E, r4 = 1
to indicate that the initial TLB entry for this core already exists so
we still should set r4 with 0 to create that initial TLB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/head_64.S |4 
 1 file changed, 4 insertions(+)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 038e81d..e60f078 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -129,6 +129,10 @@ __secondary_hold:
/* Grab our physical cpu number */
mr  r24,r3
/* stash r4 for book3e */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+   /* we need to setup initial TLB entry. */
+   li  r4,0
+#endif
mr  r25,r4
 
/* Tell the master cpu we're here */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel

2013-02-26 Thread Tiejun Chen
We need to active KEXEC for book3e and bypass or convert non-book3e stuff
in kexec coverage.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/Kconfig   |2 +-
 arch/powerpc/kernel/machine_kexec_64.c |6 ++
 arch/powerpc/kernel/misc_64.S  |6 ++
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 85ff3a0..d6d1a02 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -370,7 +370,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 
 config KEXEC
bool "kexec system call (EXPERIMENTAL)"
-   depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) && EXPERIMENTAL
+   depends on (PPC_BOOK3S || FSL_BOOKE || PPC_BOOK3E || (44x && !SMP)) && 
EXPERIMENTAL
help
  kexec is a system call that implements the ability to shutdown your
  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/powerpc/kernel/machine_kexec_64.c 
b/arch/powerpc/kernel/machine_kexec_64.c
index 466a290..a1222c8 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -32,6 +32,7 @@
 int default_machine_kexec_prepare(struct kimage *image)
 {
int i;
+#ifndef CONFIG_PPC_BOOK3E
unsigned long begin, end;   /* limits of segment */
unsigned long low, high;/* limits of blocked memory range */
struct device_node *node;
@@ -40,6 +41,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 
if (!ppc_md.hpte_clear_all)
return -ENOENT;
+#endif
 
/*
 * Since we use the kernel fault handlers and paging code to
@@ -50,6 +52,7 @@ int default_machine_kexec_prepare(struct kimage *image)
if (image->segment[i].mem < __pa(_end))
return -ETXTBSY;
 
+#ifndef CONFIG_PPC_BOOK3E
/*
 * For non-LPAR, we absolutely can not overwrite the mmu hash
 * table, since we are still using the bolted entries in it to
@@ -91,6 +94,7 @@ int default_machine_kexec_prepare(struct kimage *image)
return -ETXTBSY;
}
}
+#endif
 
return 0;
 }
@@ -363,6 +367,7 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
 }
 
+#ifndef CONFIG_PPC_BOOK3E
 /* Values we need to export to the second kernel via the device tree. */
 static unsigned long htab_base;
 
@@ -407,3 +412,4 @@ static int __init export_htab_values(void)
return 0;
 }
 late_initcall(export_htab_values);
+#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 5cfa800..c2acf8c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -532,9 +532,13 @@ _GLOBAL(kexec_sequence)
lhz r25,PACAHWCPUID(r13)/* get our phys cpu from paca */
 
/* disable interrupts, we are overwriting kernel data next */
+#ifndef CONFIG_PPC_BOOK3E
mfmsr   r3
rlwinm  r3,r3,0,17,15
mtmsrd  r3,1
+#else
+   wrteei  0
+#endif
 
/* copy dest pages, flush whole dest image */
mr  r3,r29
@@ -556,10 +560,12 @@ _GLOBAL(kexec_sequence)
li  r6,1
stw r6,kexec_flag-1b(5)
 
+#ifndef CONFIG_PPC_BOOK3E
/* clear out hardware hash page table and tlb */
ld  r5,0(r27)   /* deref function descriptor */
mtctr   r5
bctrl   /* ppc_md.hpte_clear_all(void); */
+#endif
 
 /*
  *   kexec image calling is:
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE

2013-02-26 Thread Tiejun Chen
book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.

And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/exception-64e.h |8 
 arch/powerpc/kernel/exceptions-64e.S |   15 ++-
 arch/powerpc/kernel/head_64.S|   22 ++
 arch/powerpc/lib/feature-fixups.c|7 +++
 4 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/exception-64e.h 
b/arch/powerpc/include/asm/exception-64e.h
index 51fa43e..89e940d 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -214,10 +214,18 @@ exc_##label##_book3e:
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED
 #endif
 
+#ifndef CONFIG_RELOCATABLE
 #define SET_IVOR(vector_number, vector_offset) \
li  r3,vector_offset@l; \
ori r3,r3,interrupt_base_book3e@l;  \
mtspr   SPRN_IVOR##vector_number,r3;
+#else
+#define SET_IVOR(vector_number, vector_offset) \
+   LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+   rlwinm  r3,r3,0,15,0;   \
+   ori r3,r3,vector_offset@l;  \
+   mtspr   SPRN_IVOR##vector_number,r3;
+#endif
 
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index ae54553..1e7782b 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1055,7 +1055,15 @@ skpinv:  addir6,r6,1 /* 
Increment */
  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  */
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+   /* We have to find out address from lr. */
+   bl  1f  /* Find our address */
+1: mflrr6
+   addir6,r6,(2f - 1b)
+   tovirt(r6,r6)
+#else
LOAD_REG_IMMEDIATE(r6,2f)
+#endif
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
mtspr   SPRN_SRR0,r6
@@ -1306,9 +1314,14 @@ _GLOBAL(book3e_secondary_thread_init)
mflrr28
b   3b
 
-_STATIC(init_core_book3e)
+_GLOBAL(init_core_book3e)
/* Establish the interrupt vector base */
+#ifdef CONFIG_RELOCATABLE
+   tovirt(r2,r2)
+   LOAD_REG_ADDR(r3, interrupt_base_book3e)
+#else
LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+#endif
mtspr   SPRN_IVPR,r3
sync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 0886ae6..b07ed784 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldir25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1/* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
 1: mr  r3,r25
bl  .relocate
+#if defined(CONFIG_PPC_BOOK3E)
+   /* We should set ivpr again after .relocate. */
+   bl  .init_core_book3e
+#endif
 #endif
 
 /*
@@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
  * variable __run_at_load, if it is set the kernel is treated as relocatable
  * kernel, otherwise it will be moved to PHYSICAL_START
  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1
bne 3f
 
+#ifdef CONFIG_PPC_BOOK3E
+   LOAD_REG_ADDR(r5, interrupt_end_book3e)
+   LOAD_REG_ADDR(r11, _stext)
+   sub r5,r5,r11
+#else
/* just copy interrupts */
LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
+#endif
b   5f
 3:
 #endif
diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index 7a8a748..13f20ed 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -135,13 +135,20 @@ void do_final_fixups(void)
 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
int *src, *dest;
unsigned long length;
+#ifdef CONFIG_PPC_BOOK3E
+   extern char interrupt_end_book3e[];
+#endif
 
if (PHYSICAL_START == 0)
return;
 

[v3][PATCH 0/6] powerpc/book3e: make kgdb to work well

2013-02-26 Thread Tiejun Chen
This patchset is used to support kgdb/gdb on book3e.

Validated on p4080ds and p5040ds with test single step and breakpoint

v3:

* make work when enable CONFIG_RELOCATABLE
* fix one typo in patch,
  "powerpc/book3e: store critical/machine/debug exception thread info": 
ld  r1,PACAKSAVE(r13);
->  ld  r14,PACAKSAVE(r13);
* remove copying the thread_info since booke and book3e always copy
  the thead_info now when we enter the debug exception, and so drop
  the v2 patch, "book3e/kgdb: Fix a single stgep case of lazy IRQ"

v2:

* Make sure we cover CONFIG_PPC_BOOK3E_64 safely
* Use LOAD_REG_IMMEDIATE() to load properly
the value of the constant expression in load debug exception stack 
* Copy thread infor form the kernel stack coming from usr
* Rebase latest powerpc git tree

v1:
* Copy thread info only when we are from !user mode since we'll get kernel stack
  coming from usr directly.
* remove save/restore EX_R14/EX_R15 since DBG_EXCEPTION_PROLOG already covered
  this.
* use CURRENT_THREAD_INFO() conveniently to get thread.
* fix some typos
* add a patch to make sure gdb can generate a single step properly to invoke a
  kgdb state.
* add a patch to if we need to replay an interrupt, we shouldn't restore that
  previous backup thread info to make sure we can replay an interrupt lately
  with a proper thread info.
* rebase latest powerpc git tree

v0:
This patchset is used to support kgdb for book3e.

--
Tiejun Chen (6):
  powerpc/book3e: load critical/machine/debug exception stack
  powerpc/book3e: store critical/machine/debug exception thread info
  book3e/kgdb: update thread's dbcr0
  powerpc/book3e: support kgdb for kernel space
  kgdb/kgdbts: support ppc64
  powerpc/kgdb: remove copying the thread_info

 arch/powerpc/kernel/exceptions-64e.S |   69 --
 arch/powerpc/kernel/kgdb.c   |   41 +---
 drivers/misc/kgdbts.c|2 +
 3 files changed, 77 insertions(+), 35 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 6/6] powerpc/kgdb: remove copying the thread_info

2013-02-26 Thread Tiejun Chen
Currently BookE and Book3E always copy the thread_info from
the kernel stack when we enter the debug exception, so we can
remove these action here to avoid copying again.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |   28 
 1 file changed, 28 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 1a57307..e954888 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -153,39 +153,11 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
 
 static int kgdb_singlestep(struct pt_regs *regs)
 {
-   struct thread_info *thread_info, *exception_thread_info;
-   struct thread_info *backup_current_thread_info;
-
if (user_mode(regs))
return 0;
 
-   backup_current_thread_info = (struct thread_info 
*)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
-   /*
-* On Book E and perhaps other processors, singlestep is handled on
-* the critical exception stack.  This causes current_thread_info()
-* to fail, since it it locates the thread_info by masking off
-* the low bits of the current stack pointer.  We work around
-* this issue by copying the thread_info from the kernel stack
-* before calling kgdb_handle_exception, and copying it back
-* afterwards.  On most processors the copy is avoided since
-* exception_thread_info == thread_info.
-*/
-   thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
-   exception_thread_info = current_thread_info();
-
-   if (thread_info != exception_thread_info) {
-   /* Save the original current_thread_info. */
-   memcpy(backup_current_thread_info, exception_thread_info, 
sizeof *thread_info);
-   memcpy(exception_thread_info, thread_info, sizeof *thread_info);
-   }
-
kgdb_handle_exception(0, SIGTRAP, 0, regs);
 
-   if (thread_info != exception_thread_info)
-   /* Restore current_thread_info lastly. */
-   memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
-
-   kfree(backup_current_thread_info);
return 1;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 3/6] book3e/kgdb: update thread's dbcr0

2013-02-26 Thread Tiejun Chen
gdb always need to generate a single step properly to invoke
a kgdb state. But with lazy interrupt, book3e can't always
trigger a debug exception with a single step since the current
is blocked for handling those pending exception, then we miss
that expected dbcr configuration at last to generate a debug
exception.

So here we also update thread's dbcr0 to make sure the current
can go back with that missed dbcr0 configuration.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |   13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 5ca82cd..1a57307 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -410,7 +410,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
   struct pt_regs *linux_regs)
 {
char *ptr = &remcom_in_buffer[1];
-   unsigned long addr;
+   unsigned long addr, dbcr0;
 
switch (remcom_in_buffer[0]) {
/*
@@ -427,8 +427,15 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
/* set the trace bit if we're stepping */
if (remcom_in_buffer[0] == 's') {
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
-   mtspr(SPRN_DBCR0,
- mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+   dbcr0 = mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM;
+   mtspr(SPRN_DBCR0, dbcr0);
+#ifdef CONFIG_PPC_BOOK3E_64
+   /* With lazy interrut we have to update thread dbcr0 
here
+* to make sure we can set debug properly at last to 
invoke
+* kgdb again to work well.
+*/
+   current->thread.dbcr0 = dbcr0;
+#endif
linux_regs->msr |= MSR_DE;
 #else
linux_regs->msr |= MSR_SE;
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 5/6] kgdb/kgdbts: support ppc64

2013-02-26 Thread Tiejun Chen
We can't look up the address of the entry point of the function simply
via that function symbol for all architectures.

For PPC64 ABI, actually there is a function descriptors structure.

A function descriptor is a three doubleword data structure that contains
the following values:
* The first doubleword contains the address of the entry point of
the function.
* The second doubleword contains the TOC base address for
the function.
* The third doubleword contains the environment pointer for
languages such as Pascal and PL/1.

So we should call a wapperred dereference_function_descriptor() to get
the address of the entry point of the function.

Note this is also safe for other architecture after refer to
"include/asm-generic/sections.h" since:

dereference_function_descriptor(p) always is (p) if without arched definition.

Signed-off-by: Tiejun Chen 
---
 drivers/misc/kgdbts.c |2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3aa9a96..4799e1f 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -103,6 +103,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define v1printk(a...) do { \
if (verbose) \
@@ -222,6 +223,7 @@ static unsigned long lookup_addr(char *arg)
addr = (unsigned long)do_fork;
else if (!strcmp(arg, "hw_break_val"))
addr = (unsigned long)&hw_break_val;
+   addr = (unsigned long )dereference_function_descriptor((void *)addr);
return addr;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 1/6] powerpc/book3e: load critical/machine/debug exception stack

2013-02-26 Thread Tiejun Chen
We always alloc critical/machine/debug check exceptions. This is
different from the normal exception. So we should load these exception
stack properly like we did for booke.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   49 +++---
 1 file changed, 46 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 1e7782b..7fd6af0 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -36,6 +36,37 @@
  */
 #defineSPECIAL_EXC_FRAME_SIZE  INT_FRAME_SIZE
 
+/* only on book3e */
+#define DBG_STACK_BASE dbgirq_ctx
+#define MC_STACK_BASE  mcheckirq_ctx
+#define CRIT_STACK_BASEcritirq_ctx
+
+#ifdef CONFIG_RELOCATABLE
+#define LOAD_STACK_BASE(reg, level)\
+   tovirt(r2,r2);  \
+   LOAD_REG_ADDR(reg, level##_STACK_BASE);
+#else
+#define LOAD_STACK_BASE(reg, level)\
+   LOAD_REG_IMMEDIATE(reg, level##_STACK_BASE);
+#endif
+
+#ifdef CONFIG_SMP
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   mfspr   r14,SPRN_PIR;   \
+   slwir14,r14,3;  \
+   LOAD_STACK_BASE(r10, level);\
+   add r10,r10,r14;\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#else
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   LOAD_STACK_BASE(r10, level);\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#endif
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -68,20 +99,32 @@
 #define SPRN_GDBELL_SRR1   SPRN_GSRR1
 
 #define CRIT_SET_KSTACK
\
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
 
 #define DBG_SET_KSTACK \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
 
 #define MC_SET_KSTACK  \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 4/6] powerpc/book3e: support kgdb for kernel space

2013-02-26 Thread Tiejun Chen
Currently we need to skip this for supporting KGDB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 7df9a1f..800e2a3 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -598,11 +598,14 @@ kernel_dbg_exc:
rfdi
 
/* Normal debug exception */
+1:
+#ifndef CONFIG_KGDB
/* XXX We only handle coming from userspace for now since we can't
 * quite save properly an interrupted kernel state yet
 */
-1: andi.   r14,r11,MSR_PR; /* check for userspace again */
+   andi.   r14,r11,MSR_PR; /* check for userspace again */
beq kernel_dbg_exc; /* if from kernel mode */
+#endif
 
/* Now we mash up things to make it look like we are coming on a
 * normal exception
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 2/6] powerpc/book3e: store critical/machine/debug exception thread info

2013-02-26 Thread Tiejun Chen
We need to store thread info to these exception thread info like something
we already did for PPC32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   15 +++
 1 file changed, 15 insertions(+)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 7fd6af0..7df9a1f 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -67,6 +67,18 @@
std r10,PACA_##level##_STACK(r13);
 #endif
 
+/* Store something to exception thread info */
+#defineBOOK3E_STORE_EXC_LEVEL_THEAD_INFO(type) 
\
+   ld  r14,PACAKSAVE(r13); 
\
+   CURRENT_THREAD_INFO(r14, r14);  
\
+   CURRENT_THREAD_INFO(r15, r1);   
\
+   ld  r10,TI_FLAGS(r14);  
\
+   std r10,TI_FLAGS(r15);  
\
+   ld  r10,TI_PREEMPT(r14);
\
+   std r10,TI_PREEMPT(r15);
\
+   ld  r10,TI_TASK(r14);   
\
+   std r10,TI_TASK(r15);
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -104,6 +116,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(CRIT);
\
 1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
@@ -114,6 +127,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(DBG); 
\
 1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
@@ -124,6 +138,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(MC);  
\
 1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 2/3] powerpc/book3e: support kgdb for kernel space

2012-10-24 Thread Tiejun Chen
Currently we need to skip this for supporting KGDB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 43b654a..c5564d4 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -566,11 +566,14 @@ kernel_dbg_exc:
rfdi
 
/* Normal debug exception */
+1:
+#ifndef CONFIG_KGDB
/* XXX We only handle coming from userspace for now since we can't
 * quite save properly an interrupted kernel state yet
 */
-1: andi.   r14,r11,MSR_PR; /* check for userspace again */
+   andi.   r14,r11,MSR_PR; /* check for userspace again */
beq kernel_dbg_exc; /* if from kernel mode */
+#endif
 
/* Now we mash up things to make it look like we are coming on a
 * normal exception
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/3] powerpc/book3e: load critical/machine/debug exception stack

2012-10-24 Thread Tiejun Chen
We always alloc critical/machine/debug check exceptions. This is
different from the normal exception. So we should load these exception
stack properly like we did for booke.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   27 +++
 1 file changed, 27 insertions(+)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 4684e33..43b654a 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -36,6 +36,30 @@
  */
 #defineSPECIAL_EXC_FRAME_SIZE  INT_FRAME_SIZE
 
+/* only on book3e */
+#define DBG_STACK_BASE dbgirq_ctx
+#define MC_STACK_BASE  mcheckirq_ctx
+#define CRIT_STACK_BASEcritirq_ctx
+
+#ifdef CONFIG_SMP
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   std r14,PACA_EX##level+EX_R14(r13); \
+   mfspr   r14,SPRN_PIR;   \
+   slwir14,r14,3;  \
+   LOAD_REG_ADDR(r10, level##_STACK_BASE); \
+   add r10,r10,r14;\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_DBG_STACK(r13);\
+   ld  r14,PACA_EX##level+EX_R14(r13);
+#else
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   LOAD_REG_ADDR(r10, level##_STACK_BASE); \
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_DBG_STACK(r13);
+#endif
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -68,18 +92,21 @@
 #define SPRN_GDBELL_SRR1   SPRN_GSRR1
 
 #define CRIT_SET_KSTACK
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
subir1,r1,SPECIAL_EXC_FRAME_SIZE;
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
 
 #define DBG_SET_KSTACK \
+   BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
 
 #define MC_SET_KSTACK  \
+   BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;
 #define SPRN_MC_SRR0   SPRN_MCSRR0
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 3/3] kgdb/kgdbts: support ppc64

2012-10-24 Thread Tiejun Chen
We can't look up the address of the entry point of the function simply
via that function symbol for all architectures.

For PPC64 ABI, actually there is a function descriptors structure.

A function descriptor is a three doubleword data structure that contains
the following values:
* The first doubleword contains the address of the entry point of
the function.
* The second doubleword contains the TOC base address for
the function.
* The third doubleword contains the environment pointer for
languages such as Pascal and PL/1.

So we should call a wapperred dereference_function_descriptor() to get
the address of the entry point of the function.

Note this is also safe for other architecture after refer to
"include/asm-generic/sections.h" since:

dereference_function_descriptor(p) always is (p) if without arched definition.

Signed-off-by: Tiejun Chen 
---
 drivers/misc/kgdbts.c |2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3aa9a96..4799e1f 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -103,6 +103,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define v1printk(a...) do { \
if (verbose) \
@@ -222,6 +223,7 @@ static unsigned long lookup_addr(char *arg)
addr = (unsigned long)do_fork;
else if (!strcmp(arg, "hw_break_val"))
addr = (unsigned long)&hw_break_val;
+   addr = (unsigned long )dereference_function_descriptor((void *)addr);
return addr;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/1] powerpc/book3e: store critical/machine/debug exception thread info

2012-10-26 Thread Tiejun Chen
We need to store thread info to these exception thread info like something
we already did for PPC32.

Signed-off-by: Tiejun Chen 
---
This patch is followed on my three patches I send recently:

[PATCH 1/3] powerpc/book3e: load critical/machine/debug exception stack
[PATCH 2/3] powerpc/book3e: support kgdb for kernel space
[PATCH 3/3] kgdb/kgdbts: support ppc64

Tiejun

 arch/powerpc/kernel/exceptions-64e.S |   20 
 1 file changed, 20 insertions(+)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index c5564d4..4e7083e 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -91,10 +91,28 @@
 #define SPRN_GDBELL_SRR0   SPRN_GSRR0
 #define SPRN_GDBELL_SRR1   SPRN_GSRR1
 
+/* Store something to exception thread info */
+#defineBOOK3E_STORE_EXC_LEVEL_THEAD_INFO(type) 
\
+   std r14,PACA_EX##type+EX_R14(r13);  
\
+   std r15,PACA_EX##type+EX_R15(r13);  
\
+   ld  r14,PACA_EX##type+EX_R1(r13);   
\
+   clrrdi  r14,r14,THREAD_SHIFT;   
\
+   clrrdi  r15,r1,THREAD_SHIFT;
\
+   ld  r10,TI_FLAGS(r14);  
\
+   std r10,TI_FLAGS(r15);  
\
+   ld  r10,TI_PREEMPT(r14);
\
+   std r10,TI_PREEMPT(r1); 
\
+   ld  r10,TI_TASK(r14);   
\
+   std r10,TI_TASK(r1);
\
+   ld  r14,PACA_EX##type+EX_R14(r13);  
\
+   ld  r15,PACA_EX##type+EX_R15(r13);  
\
+1:
+
 #define CRIT_SET_KSTACK
\
BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(CRIT);
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
 
@@ -102,6 +120,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(DBG);
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
 
@@ -109,6 +128,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(MC);
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 4/8] book3e/kexec/kdump: create a 1:1 TLB mapping

2013-08-25 Thread Tiejun Chen
book3e have no real MMU mode so we have to create a 1:1 TLB
mapping to make sure we can access the real physical address.
And correct something to support this pseudo real mode on book3e.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/head_64.S |9 ---
 arch/powerpc/kernel/misc_64.S |   55 -
 2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 27cfbcd..fd42f8a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -447,12 +447,12 @@ _STATIC(__after_prom_start)
tovirt(r3,r3)   /* on booke, we already run at 
PAGE_OFFSET */
 #endif
mr. r4,r26  /* In some cases the loader may  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r4,r4)
+#endif
beq 9f  /* have already put us at zero */
li  r6,0x100/* Start offset, the first 0x100 */
/* bytes were copied earlier.*/
-#ifdef CONFIG_PPC_BOOK3E
-   tovirt(r6,r6)   /* on booke, we already run at 
PAGE_OFFSET */
-#endif
 
 #ifdef CONFIG_RELOCATABLE
 /*
@@ -495,6 +495,9 @@ _STATIC(__after_prom_start)
 p_end: .llong  _end - _stext
 
 4: /* Now copy the rest of the kernel up to _end */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26)
+#endif
addis   r5,r26,(p_end - _stext)@ha
ld  r5,(p_end - _stext)@l(r5)   /* get _end */
 5: bl  .copy_and_flush /* copy the rest */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 2d3fd07..049be29 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -480,6 +480,49 @@ kexec_flag:
 
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/* BOOK3E have no a real MMU mode so we have to setup the initial TLB
+ * for a core to map v:0 to p:0 as 1:1. This current implementation
+ * assume that 1G is enough for kexec.
+ */
+#include 
+kexec_create_tlb:
+   /* Invalidate all TLBs to avoid any TLB conflict. */
+   PPC_TLBILX_ALL(0,R0)
+   sync
+   isync
+
+   mfspr   r10,SPRN_TLB1CFG
+   andi.   r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
+   subir10,r10,1   /* Often its always safe to use last */
+   lis r9,MAS0_TLBSEL(1)@h
+   rlwimi  r9,r10,16,4,15  /* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it.
+ */
+#ifdef CONFIG_SMP
+#define M_IF_SMP   MAS2_M
+#else
+#define M_IF_SMP   0
+#endif
+   mtspr   SPRN_MAS0,r9
+
+   lis r9,(MAS1_VALID|MAS1_IPROT)@h
+   ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+   mtspr   SPRN_MAS1,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP)
+   mtspr   SPRN_MAS2,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+   mtspr   SPRN_MAS3,r9
+   li  r9,0
+   mtspr   SPRN_MAS7,r9
+
+   tlbwe
+   isync
+   blr
+#endif
 
 /* kexec_smp_wait(void)
  *
@@ -493,6 +536,10 @@ kexec_flag:
  */
 _GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
+#ifdef CONFIG_PPC_BOOK3E
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
+#endif
bl  real_mode
 
li  r4,KEXEC_STATE_REAL_MODE
@@ -509,6 +556,7 @@ _GLOBAL(kexec_smp_wait)
  * don't overwrite r3 here, it is live for kexec_wait above.
  */
 real_mode: /* assume normal blr return */
+#ifndef CONFIG_PPC_BOOK3E
 1: li  r9,MSR_RI
li  r10,MSR_DR|MSR_IR
mflrr11 /* return address to SRR0 */
@@ -520,7 +568,10 @@ real_mode: /* assume normal blr return */
mtspr   SPRN_SRR1,r10
mtspr   SPRN_SRR0,r11
rfid
-
+#else
+   /* the real mode is nothing for book3e. */
+   blr
+#endif
 
 /*
  * kexec_sequence(newstack, start, image, control, clear_all())
@@ -569,6 +620,8 @@ _GLOBAL(kexec_sequence)
mtmsrd  r3,1
 #else
wrteei  0
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
 #endif
 
/* copy dest pages, flush whole dest image */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 0/8] powerpc/book3e: support kexec and kdump

2013-08-25 Thread Tiejun Chen
Ben,

I don't see any further comments, so could you merge this kindly?

This patchset is used to support kexec and kdump on book3e.

Tested on fsl-p5040 DS.

v4:

* rebase on next branch

v3:

* add one patch to rename interrupt_end_book3e with __end_interrupts
  then we can have a unique lable for book3e and book3s.
* add some comments for "book3e/kexec/kdump: enable kexec for kernel"
* clean "book3e/kexec/kdump: introduce a kexec kernel flag"

v2:
* rebase on merge branch

v1:
* improve some patch head
* rebase on next branch with patch 7

----
Tiejun Chen (8):
  powerpc/book3e: rename interrupt_end_book3e with __end_interrupts
  powerpc/book3e: support CONFIG_RELOCATABLE
  book3e/kexec/kdump: enable kexec for kernel
  book3e/kexec/kdump: create a 1:1 TLB mapping
  book3e/kexec/kdump: introduce a kexec kernel flag
  book3e/kexec/kdump: implement ppc64 kexec specfic
  book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET
  book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

 arch/powerpc/Kconfig |2 +-
 arch/powerpc/include/asm/exception-64e.h |   11 +++
 arch/powerpc/include/asm/page.h  |2 +
 arch/powerpc/include/asm/smp.h   |1 +
 arch/powerpc/kernel/exceptions-64e.S |   26 +-
 arch/powerpc/kernel/head_64.S|   48 +-
 arch/powerpc/kernel/machine_kexec_64.c   |  148 +-
 arch/powerpc/kernel/misc_64.S|   67 +-
 arch/powerpc/platforms/85xx/smp.c|   33 ++-
 9 files changed, 257 insertions(+), 81 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 6/8] book3e/kexec/kdump: implement ppc64 kexec specfic

2013-08-25 Thread Tiejun Chen
ppc64 kexec mechanism has a different implementation with ppc32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/platforms/85xx/smp.c |   13 +
 1 file changed, 13 insertions(+)

diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index 549948a..137ad10 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -277,6 +277,7 @@ struct smp_ops_t smp_85xx_ops = {
 };
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC32
 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 
 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
@@ -295,6 +296,14 @@ static void mpc85xx_smp_kexec_down(void *arg)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0,1);
 }
+#else
+void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+   local_irq_disable();
+   hard_irq_disable();
+   mpic_teardown_this_cpu(secondary);
+}
+#endif
 
 static void map_and_flush(unsigned long paddr)
 {
@@ -346,11 +355,14 @@ static void mpc85xx_smp_flush_dcache_kexec(struct kimage 
*image)
 
 static void mpc85xx_smp_machine_kexec(struct kimage *image)
 {
+#ifdef CONFIG_PPC32
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
+#endif
 
mpc85xx_smp_flush_dcache_kexec(image);
 
+#ifdef CONFIG_PPC32
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 
@@ -368,6 +380,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);
}
+#endif
 
default_machine_kexec(image);
 }
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 7/8] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

2013-08-25 Thread Tiejun Chen
Book3e is always aligned 1GB to create TLB so we should
use (KERNELBASE - MEMORY_START) as VIRT_PHYS_OFFSET to
get __pa/__va properly while boot kdump.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/page.h |2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812..5b00081 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -112,6 +112,8 @@ extern long long virt_phys_offset;
 /* See Description below for VIRT_PHYS_OFFSET */
 #ifdef CONFIG_RELOCATABLE_PPC32
 #define VIRT_PHYS_OFFSET virt_phys_offset
+#elif defined(CONFIG_PPC_BOOK3E_64)
+#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START)
 #else
 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 #endif
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 5/8] book3e/kexec/kdump: introduce a kexec kernel flag

2013-08-25 Thread Tiejun Chen
We need to introduce a flag to indicate we're already running
a kexec kernel then we can go proper path. For example, We
shouldn't access spin_table from the bootloader to up any secondary
cpu for kexec kernel, and kexec kernel already know how to jump to
generic_secondary_smp_init.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/smp.h|1 +
 arch/powerpc/kernel/head_64.S |   10 ++
 arch/powerpc/kernel/misc_64.S |6 ++
 arch/powerpc/platforms/85xx/smp.c |   20 +++-
 4 files changed, 32 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 98da78e..92f7e61 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -207,6 +207,7 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+extern unsigned long __run_at_kexec;
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index fd42f8a..fa74d20 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -89,6 +89,10 @@ __secondary_hold_spinloop:
 __secondary_hold_acknowledge:
.llong  0x0
 
+   .globl  __run_at_kexec
+__run_at_kexec:
+   .llong  0x0 /* Flag for the secondary kernel from kexec. */
+
 #ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
 * at the loaded address instead of the linked address.  This
@@ -426,6 +430,7 @@ _STATIC(__after_prom_start)
add r25,r25,r26
 1: mr  r3,r25
bl  .relocate
+
 #if defined(CONFIG_PPC_BOOK3E)
/* In relocatable case we always have to load the address of label 
'name'
 * to set IVPR. So after .relocate we have to update IVPR with current
@@ -463,6 +468,11 @@ _STATIC(__after_prom_start)
 #if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
 #endif
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   /* If relocated we need to restore this flag on that relocated address. 
*/
+   ld  r7,__run_at_kexec-_stext(r3)
+   std r7,__run_at_kexec-_stext(r26)
+#endif
lwz r7,__run_at_load-_stext(r26)
 #if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26) /* Restore for the remains. */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 049be29..adf60b6 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -639,6 +639,12 @@ _GLOBAL(kexec_sequence)
bl  .copy_and_flush /* (dest, src, copy limit, start offset) */
 1: /* assume normal blr return */
 
+   /* notify we're going into kexec kernel for SMP. */
+   LOAD_REG_ADDR(r3,__run_at_kexec)
+   li  r4,1
+   std r4,0(r3)
+   sync
+
/* release other cpus to the new kernel secondary start at 0x60 */
mflrr5
li  r6,1
diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index ea9c626..549948a 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -150,6 +150,9 @@ static int smp_85xx_kick_cpu(int nr)
int hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
int ret = 0;
+#ifdef CONFIG_PPC64
+   unsigned long *ptr = NULL;
+#endif
 
WARN_ON(nr < 0 || nr >= NR_CPUS);
WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -238,11 +241,18 @@ out:
 #else
smp_generic_kick_cpu(nr);
 
-   flush_spin_table(spin_table);
-   out_be32(&spin_table->pir, hw_cpu);
-   out_be64((u64 *)(&spin_table->addr_h),
- __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
-   flush_spin_table(spin_table);
+   ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);
+   /* We shouldn't access spin_table from the bootloader to up any
+* secondary cpu for kexec kernel, and kexec kernel already
+* know how to jump to generic_secondary_smp_init.
+*/
+   if (!*ptr) {
+   flush_spin_table(spin_table);
+   out_be32(&spin_table->pir, hw_cpu);
+   out_be64((u64 *)(&spin_table->addr_h),
+__pa((u64)*((unsigned long long 
*)generic_secondary_smp_init)));
+   flush_spin_table(spin_table);
+   }
 #endif
 
local_irq_restore(flags);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 8/8] book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

2013-08-25 Thread Tiejun Chen
In commit 96f013f, "powerpc/kexec: Add kexec "hold" support for Book3e
processors", requires that GPR4 survive the "hold" process, for IBM Blue
Gene/Q with with some very strange firmware. But for FSL Book3E, r4 = 1
to indicate that the initial TLB entry for this core already exists so
we still should set r4 with 0 to create that initial TLB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/head_64.S |4 
 1 file changed, 4 insertions(+)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index fa74d20..001b112 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -127,6 +127,10 @@ __secondary_hold:
/* Grab our physical cpu number */
mr  r24,r3
/* stash r4 for book3e */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+   /* we need to setup initial TLB entry. */
+   li  r4,0
+#endif
mr  r25,r4
 
/* Tell the master cpu we're here */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 2/8] powerpc/book3e: support CONFIG_RELOCATABLE

2013-08-25 Thread Tiejun Chen
book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.

And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/exception-64e.h |   11 +++
 arch/powerpc/kernel/exceptions-64e.S |   18 +-
 arch/powerpc/kernel/head_64.S|   25 +
 3 files changed, 53 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/exception-64e.h 
b/arch/powerpc/include/asm/exception-64e.h
index 51fa43e..371a77f 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -214,10 +214,21 @@ exc_##label##_book3e:
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED
 #endif
 
+#ifndef CONFIG_RELOCATABLE
 #define SET_IVOR(vector_number, vector_offset) \
li  r3,vector_offset@l; \
ori r3,r3,interrupt_base_book3e@l;  \
mtspr   SPRN_IVOR##vector_number,r3;
+#else /* !CONFIG_RELOCATABLE */
+/* In relocatable case the value of the constant expression 'expr' is only
+ * offset. So instead, we should loads the address of label 'name'.
+ */
+#define SET_IVOR(vector_number, vector_offset) \
+   LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+   rlwinm  r3,r3,0,15,0;   \
+   ori r3,r3,vector_offset@l;  \
+   mtspr   SPRN_IVOR##vector_number,r3;
+#endif /* CONFIG_RELOCATABLE */
 
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 99cb68e..e71511c 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1097,7 +1097,15 @@ skpinv:  addir6,r6,1 /* 
Increment */
  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  */
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+   /* We have to find out address from lr. */
+   bl  1f  /* Find our address */
+1: mflrr6
+   addir6,r6,(2f - 1b)
+   tovirt(r6,r6)
+#else
LOAD_REG_IMMEDIATE(r6,2f)
+#endif
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
mtspr   SPRN_SRR0,r6
@@ -1348,9 +1356,17 @@ _GLOBAL(book3e_secondary_thread_init)
mflrr28
b   3b
 
-_STATIC(init_core_book3e)
+_GLOBAL(init_core_book3e)
/* Establish the interrupt vector base */
+#ifdef CONFIG_RELOCATABLE
+/* In relocatable case the value of the constant expression 'expr' is only
+ * offset. So instead, we should loads the address of label 'name'.
+ */
+   tovirt(r2,r2)
+   LOAD_REG_ADDR(r3, interrupt_base_book3e)
+#else
LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+#endif
mtspr   SPRN_IVPR,r3
sync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3d11d80..27cfbcd 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -414,12 +414,25 @@ _STATIC(__after_prom_start)
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldir25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1/* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
 1: mr  r3,r25
bl  .relocate
+#if defined(CONFIG_PPC_BOOK3E)
+   /* In relocatable case we always have to load the address of label 
'name'
+* to set IVPR. So after .relocate we have to update IVPR with current
+* address of label.
+*/
+   bl  .init_core_book3e
+#endif
 #endif
 
 /*
@@ -447,12 +460,24 @@ _STATIC(__after_prom_start)
  * variable __run_at_load, if it is set the kernel is treated as relocatable
  * kernel, otherwise it will be moved to PHYSICAL_START
  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1
bne 3f
 
+#ifdef CONFIG_PPC_BOOK3E
+   LOAD_REG_ADDR(r5, __end_interrupts)
+   LOAD_REG_ADDR(r11, _stext)
+   sub r5,r5,r11
+#else
/* just copy interrupts */
LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
+#endif
b   5f
 3:
 #endif
-- 
1.

[v4][PATCH 3/8] book3e/kexec/kdump: enable kexec for kernel

2013-08-25 Thread Tiejun Chen
We need to active KEXEC for book3e and bypass or convert non-book3e stuff
in kexec coverage.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/Kconfig   |2 +-
 arch/powerpc/kernel/machine_kexec_64.c |  148 ++--
 arch/powerpc/kernel/misc_64.S  |6 ++
 3 files changed, 89 insertions(+), 67 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 7205989..3c91ad0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -378,7 +378,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 
 config KEXEC
bool "kexec system call"
-   depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
+   depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
help
  kexec is a system call that implements the ability to shutdown your
  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/powerpc/kernel/machine_kexec_64.c 
b/arch/powerpc/kernel/machine_kexec_64.c
index 611acdf..ee153a8 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -30,72 +30,6 @@
 #include 
 #include 
 
-int default_machine_kexec_prepare(struct kimage *image)
-{
-   int i;
-   unsigned long begin, end;   /* limits of segment */
-   unsigned long low, high;/* limits of blocked memory range */
-   struct device_node *node;
-   const unsigned long *basep;
-   const unsigned int *sizep;
-
-   if (!ppc_md.hpte_clear_all)
-   return -ENOENT;
-
-   /*
-* Since we use the kernel fault handlers and paging code to
-* handle the virtual mode, we must make sure no destination
-* overlaps kernel static data or bss.
-*/
-   for (i = 0; i < image->nr_segments; i++)
-   if (image->segment[i].mem < __pa(_end))
-   return -ETXTBSY;
-
-   /*
-* For non-LPAR, we absolutely can not overwrite the mmu hash
-* table, since we are still using the bolted entries in it to
-* do the copy.  Check that here.
-*
-* It is safe if the end is below the start of the blocked
-* region (end <= low), or if the beginning is after the
-* end of the blocked region (begin >= high).  Use the
-* boolean identity !(a || b)  === (!a && !b).
-*/
-   if (htab_address) {
-   low = __pa(htab_address);
-   high = low + htab_size_bytes;
-
-   for (i = 0; i < image->nr_segments; i++) {
-   begin = image->segment[i].mem;
-   end = begin + image->segment[i].memsz;
-
-   if ((begin < high) && (end > low))
-   return -ETXTBSY;
-   }
-   }
-
-   /* We also should not overwrite the tce tables */
-   for_each_node_by_type(node, "pci") {
-   basep = of_get_property(node, "linux,tce-base", NULL);
-   sizep = of_get_property(node, "linux,tce-size", NULL);
-   if (basep == NULL || sizep == NULL)
-   continue;
-
-   low = *basep;
-   high = low + (*sizep);
-
-   for (i = 0; i < image->nr_segments; i++) {
-   begin = image->segment[i].mem;
-   end = begin + image->segment[i].memsz;
-
-   if ((begin < high) && (end > low))
-   return -ETXTBSY;
-   }
-   }
-
-   return 0;
-}
-
 #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
 
 static void copy_segments(unsigned long ind)
@@ -367,6 +301,87 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
 }
 
+#ifdef CONFIG_PPC_BOOK3E
+int default_machine_kexec_prepare(struct kimage *image)
+{
+   int i;
+   /*
+* Since we use the kernel fault handlers and paging code to
+* handle the virtual mode, we must make sure no destination
+* overlaps kernel static data or bss.
+*/
+   for (i = 0; i < image->nr_segments; i++)
+   if (image->segment[i].mem < __pa(_end))
+   return -ETXTBSY;
+   return 0;
+}
+#else /* CONFIG_PPC_BOOK3E */
+int default_machine_kexec_prepare(struct kimage *image)
+{
+   int i;
+   unsigned long begin, end;   /* limits of segment */
+   unsigned long low, high;/* limits of blocked memory range */
+   struct device_node *node;
+   const unsigned long *basep;
+   const unsigned int *sizep;
+
+   if (!ppc_md.hpte_clear_all)
+   return -ENOENT;
+
+   /*
+* Since we use the kernel fault handlers and paging code to
+* handle the virtual mode, we must make sure no destination
+*

[v4][PATCH 1/8] powerpc/book3e: rename interrupt_end_book3e with __end_interrupts

2013-08-25 Thread Tiejun Chen
We can rename 'interrupt_end_book3e' with '__end_interrupts' then
book3s/book3e can share this unique label to make sure we can use
this conveniently.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 2d06704..99cb68e 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -309,8 +309,8 @@ interrupt_base_book3e:  
/* fake trap */
EXCEPTION_STUB(0x300, hypercall)
EXCEPTION_STUB(0x320, ehpriv)
 
-   .globl interrupt_end_book3e
-interrupt_end_book3e:
+   .globl __end_interrupts
+__end_interrupts:
 
 /* Critical Input Interrupt */
START_EXCEPTION(critical_input);
@@ -493,7 +493,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
beq+1f
 
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
-   LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
+   LOAD_REG_IMMEDIATE(r15,__end_interrupts)
cmpld   cr0,r10,r14
cmpld   cr1,r10,r15
blt+cr0,1f
@@ -559,7 +559,7 @@ kernel_dbg_exc:
beq+1f
 
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
-   LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
+   LOAD_REG_IMMEDIATE(r15,__end_interrupts)
cmpld   cr0,r10,r14
cmpld   cr1,r10,r15
blt+cr0,1f
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 8/8] book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

2013-07-09 Thread Tiejun Chen
In commit 96f013f, "powerpc/kexec: Add kexec "hold" support for Book3e
processors", requires that GPR4 survive the "hold" process, for IBM Blue
Gene/Q with with some very strange firmware. But for FSL Book3E, r4 = 1
to indicate that the initial TLB entry for this core already exists so
we still should set r4 with 0 to create that initial TLB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/head_64.S |4 
 1 file changed, 4 insertions(+)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 0b46c9d..d546c5e 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -127,6 +127,10 @@ __secondary_hold:
/* Grab our physical cpu number */
mr  r24,r3
/* stash r4 for book3e */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+   /* we need to setup initial TLB entry. */
+   li  r4,0
+#endif
mr  r25,r4
 
/* Tell the master cpu we're here */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 7/8] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

2013-07-09 Thread Tiejun Chen
Book3e is always aligned 1GB to create TLB so we should
use (KERNELBASE - MEMORY_START) as VIRT_PHYS_OFFSET to
get __pa/__va properly while boot kdump.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/page.h |2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812..5b00081 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -112,6 +112,8 @@ extern long long virt_phys_offset;
 /* See Description below for VIRT_PHYS_OFFSET */
 #ifdef CONFIG_RELOCATABLE_PPC32
 #define VIRT_PHYS_OFFSET virt_phys_offset
+#elif defined(CONFIG_PPC_BOOK3E_64)
+#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START)
 #else
 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 #endif
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 1/8] powerpc/book3e: rename interrupt_end_book3e with __end_interrupts

2013-07-09 Thread Tiejun Chen
We can rename 'interrupt_end_book3e' with '__end_interrupts' then
book3s/book3e can share this unique label to make sure we can use
this conveniently.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 645170a..a518e48 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -309,8 +309,8 @@ interrupt_base_book3e:  
/* fake trap */
EXCEPTION_STUB(0x300, hypercall)
EXCEPTION_STUB(0x320, ehpriv)
 
-   .globl interrupt_end_book3e
-interrupt_end_book3e:
+   .globl __end_interrupts
+__end_interrupts:
 
 /* Critical Input Interrupt */
START_EXCEPTION(critical_input);
@@ -493,7 +493,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
beq+1f
 
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
-   LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
+   LOAD_REG_IMMEDIATE(r15,__end_interrupts)
cmpld   cr0,r10,r14
cmpld   cr1,r10,r15
blt+cr0,1f
@@ -559,7 +559,7 @@ kernel_dbg_exc:
beq+1f
 
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
-   LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
+   LOAD_REG_IMMEDIATE(r15,__end_interrupts)
cmpld   cr0,r10,r14
cmpld   cr1,r10,r15
blt+cr0,1f
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 0/8] powerpc/book3e: support kexec and kdump

2013-07-09 Thread Tiejun Chen
This patchset is used to support kexec and kdump on book3e.

Tested on fsl-p5040 DS.

v3:

* add one patch to rename interrupt_end_book3e with __end_interrupts
  then we can have a unique lable for book3e and book3s.
* add some comments for "book3e/kexec/kdump: enable kexec for kernel"
* clean "book3e/kexec/kdump: introduce a kexec kernel flag"

v2:
* rebase on merge branch

v1:
* improve some patch head
* rebase on next branch with patch 7

----
Tiejun Chen (8):
  powerpc/book3e: rename interrupt_end_book3e with __end_interrupts
  powerpc/book3e: support CONFIG_RELOCATABLE
  book3e/kexec/kdump: enable kexec for kernel
  book3e/kexec/kdump: create a 1:1 TLB mapping
  book3e/kexec/kdump: introduce a kexec kernel flag
  book3e/kexec/kdump: implement ppc64 kexec specfic
  book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET
  book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

 arch/powerpc/Kconfig |2 +-
 arch/powerpc/include/asm/exception-64e.h |   11 +++
 arch/powerpc/include/asm/page.h  |2 +
 arch/powerpc/include/asm/smp.h   |1 +
 arch/powerpc/kernel/exceptions-64e.S |   26 +-
 arch/powerpc/kernel/head_64.S|   48 +-
 arch/powerpc/kernel/machine_kexec_64.c   |  148 +-
 arch/powerpc/kernel/misc_64.S|   67 +-
 arch/powerpc/platforms/85xx/smp.c|   33 ++-
 9 files changed, 257 insertions(+), 81 deletions(-)

 arch/powerpc/kernel/exceptions-64e.S |8 
 1 file changed, 4 insertions(+), 4 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 6/8] book3e/kexec/kdump: implement ppc64 kexec specfic

2013-07-09 Thread Tiejun Chen
ppc64 kexec mechanism has a different implementation with ppc32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/platforms/85xx/smp.c |   13 +
 1 file changed, 13 insertions(+)

diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index 14d461b..d862808 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -276,6 +276,7 @@ struct smp_ops_t smp_85xx_ops = {
 };
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC32
 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 
 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
@@ -294,6 +295,14 @@ static void mpc85xx_smp_kexec_down(void *arg)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0,1);
 }
+#else
+void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+   local_irq_disable();
+   hard_irq_disable();
+   mpic_teardown_this_cpu(secondary);
+}
+#endif
 
 static void map_and_flush(unsigned long paddr)
 {
@@ -345,11 +354,14 @@ static void mpc85xx_smp_flush_dcache_kexec(struct kimage 
*image)
 
 static void mpc85xx_smp_machine_kexec(struct kimage *image)
 {
+#ifdef CONFIG_PPC32
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
+#endif
 
mpc85xx_smp_flush_dcache_kexec(image);
 
+#ifdef CONFIG_PPC32
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 
@@ -367,6 +379,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);
}
+#endif
 
default_machine_kexec(image);
 }
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 5/8] book3e/kexec/kdump: introduce a kexec kernel flag

2013-07-09 Thread Tiejun Chen
We need to introduce a flag to indicate we're already running
a kexec kernel then we can go proper path. For example, We
shouldn't access spin_table from the bootloader to up any secondary
cpu for kexec kernel, and kexec kernel already know how to jump to
generic_secondary_smp_init.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/smp.h|1 +
 arch/powerpc/kernel/head_64.S |   10 ++
 arch/powerpc/kernel/misc_64.S |6 ++
 arch/powerpc/platforms/85xx/smp.c |   20 +++-
 4 files changed, 32 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabe..59165a3 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -200,6 +200,7 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+extern unsigned long __run_at_kexec;
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 7dc56be..0b46c9d 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -89,6 +89,10 @@ __secondary_hold_spinloop:
 __secondary_hold_acknowledge:
.llong  0x0
 
+   .globl  __run_at_kexec
+__run_at_kexec:
+   .llong  0x0 /* Flag for the secondary kernel from kexec. */
+
 #ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
 * at the loaded address instead of the linked address.  This
@@ -417,6 +421,12 @@ _STATIC(__after_prom_start)
 #if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
 #endif
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   /* If relocated we need to restore this flag on that relocated address. 
*/
+   ld  r7,__run_at_kexec-_stext(r26)
+   std r7,__run_at_kexec-_stext(r26)
+#endif
+
lwz r7,__run_at_load-_stext(r26)
 #if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26) /* Restore for the remains. */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 20cbb98..c89aead 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -619,6 +619,12 @@ _GLOBAL(kexec_sequence)
bl  .copy_and_flush /* (dest, src, copy limit, start offset) */
 1: /* assume normal blr return */
 
+   /* notify we're going into kexec kernel for SMP. */
+   LOAD_REG_ADDR(r3,__run_at_kexec)
+   li  r4,1
+   std r4,0(r3)
+   sync
+
/* release other cpus to the new kernel secondary start at 0x60 */
mflrr5
li  r6,1
diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index 5ced4f5..14d461b 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -150,6 +150,9 @@ static int smp_85xx_kick_cpu(int nr)
int hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
int ret = 0;
+#ifdef CONFIG_PPC64
+   unsigned long *ptr = NULL;
+#endif
 
WARN_ON(nr < 0 || nr >= NR_CPUS);
WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -238,11 +241,18 @@ out:
 #else
smp_generic_kick_cpu(nr);
 
-   flush_spin_table(spin_table);
-   out_be32(&spin_table->pir, hw_cpu);
-   out_be64((u64 *)(&spin_table->addr_h),
- __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
-   flush_spin_table(spin_table);
+   ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);
+   /* We shouldn't access spin_table from the bootloader to up any
+* secondary cpu for kexec kernel, and kexec kernel already
+* know how to jump to generic_secondary_smp_init.
+*/
+   if (!*ptr) {
+   flush_spin_table(spin_table);
+   out_be32(&spin_table->pir, hw_cpu);
+   out_be64((u64 *)(&spin_table->addr_h),
+__pa((u64)*((unsigned long long 
*)generic_secondary_smp_init)));
+   flush_spin_table(spin_table);
+   }
 #endif
 
local_irq_restore(flags);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 4/8] book3e/kexec/kdump: create a 1:1 TLB mapping

2013-07-09 Thread Tiejun Chen
book3e have no real MMU mode so we have to create a 1:1 TLB
mapping to make sure we can access the real physical address.
And correct something to support this pseudo real mode on book3e.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/head_64.S |9 ---
 arch/powerpc/kernel/misc_64.S |   55 -
 2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 550f8fb..7dc56be 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -447,12 +447,12 @@ _STATIC(__after_prom_start)
tovirt(r3,r3)   /* on booke, we already run at 
PAGE_OFFSET */
 #endif
mr. r4,r26  /* In some cases the loader may  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r4,r4)
+#endif
beq 9f  /* have already put us at zero */
li  r6,0x100/* Start offset, the first 0x100 */
/* bytes were copied earlier.*/
-#ifdef CONFIG_PPC_BOOK3E
-   tovirt(r6,r6)   /* on booke, we already run at 
PAGE_OFFSET */
-#endif
 
 #ifdef CONFIG_RELOCATABLE
 /*
@@ -495,6 +495,9 @@ _STATIC(__after_prom_start)
 p_end: .llong  _end - _stext
 
 4: /* Now copy the rest of the kernel up to _end */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26)
+#endif
addis   r5,r26,(p_end - _stext)@ha
ld  r5,(p_end - _stext)@l(r5)   /* get _end */
 5: bl  .copy_and_flush /* copy the rest */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index f1a7ce7..20cbb98 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -460,6 +460,49 @@ kexec_flag:
 
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/* BOOK3E have no a real MMU mode so we have to setup the initial TLB
+ * for a core to map v:0 to p:0 as 1:1. This current implementation
+ * assume that 1G is enough for kexec.
+ */
+#include 
+kexec_create_tlb:
+   /* Invalidate all TLBs to avoid any TLB conflict. */
+   PPC_TLBILX_ALL(0,R0)
+   sync
+   isync
+
+   mfspr   r10,SPRN_TLB1CFG
+   andi.   r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
+   subir10,r10,1   /* Often its always safe to use last */
+   lis r9,MAS0_TLBSEL(1)@h
+   rlwimi  r9,r10,16,4,15  /* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it.
+ */
+#ifdef CONFIG_SMP
+#define M_IF_SMP   MAS2_M
+#else
+#define M_IF_SMP   0
+#endif
+   mtspr   SPRN_MAS0,r9
+
+   lis r9,(MAS1_VALID|MAS1_IPROT)@h
+   ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+   mtspr   SPRN_MAS1,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP)
+   mtspr   SPRN_MAS2,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+   mtspr   SPRN_MAS3,r9
+   li  r9,0
+   mtspr   SPRN_MAS7,r9
+
+   tlbwe
+   isync
+   blr
+#endif
 
 /* kexec_smp_wait(void)
  *
@@ -473,6 +516,10 @@ kexec_flag:
  */
 _GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
+#ifdef CONFIG_PPC_BOOK3E
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
+#endif
bl  real_mode
 
li  r4,KEXEC_STATE_REAL_MODE
@@ -489,6 +536,7 @@ _GLOBAL(kexec_smp_wait)
  * don't overwrite r3 here, it is live for kexec_wait above.
  */
 real_mode: /* assume normal blr return */
+#ifndef CONFIG_PPC_BOOK3E
 1: li  r9,MSR_RI
li  r10,MSR_DR|MSR_IR
mflrr11 /* return address to SRR0 */
@@ -500,7 +548,10 @@ real_mode: /* assume normal blr return */
mtspr   SPRN_SRR1,r10
mtspr   SPRN_SRR0,r11
rfid
-
+#else
+   /* the real mode is nothing for book3e. */
+   blr
+#endif
 
 /*
  * kexec_sequence(newstack, start, image, control, clear_all())
@@ -549,6 +600,8 @@ _GLOBAL(kexec_sequence)
mtmsrd  r3,1
 #else
wrteei  0
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
 #endif
 
/* copy dest pages, flush whole dest image */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v3][PATCH 2/8] powerpc/book3e: support CONFIG_RELOCATABLE

2013-07-09 Thread Tiejun Chen
book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.

And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/exception-64e.h |   11 +++
 arch/powerpc/kernel/exceptions-64e.S |   18 +-
 arch/powerpc/kernel/head_64.S|   25 +
 3 files changed, 53 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/exception-64e.h 
b/arch/powerpc/include/asm/exception-64e.h
index 51fa43e..371a77f 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -214,10 +214,21 @@ exc_##label##_book3e:
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED
 #endif
 
+#ifndef CONFIG_RELOCATABLE
 #define SET_IVOR(vector_number, vector_offset) \
li  r3,vector_offset@l; \
ori r3,r3,interrupt_base_book3e@l;  \
mtspr   SPRN_IVOR##vector_number,r3;
+#else /* !CONFIG_RELOCATABLE */
+/* In relocatable case the value of the constant expression 'expr' is only
+ * offset. So instead, we should loads the address of label 'name'.
+ */
+#define SET_IVOR(vector_number, vector_offset) \
+   LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+   rlwinm  r3,r3,0,15,0;   \
+   ori r3,r3,vector_offset@l;  \
+   mtspr   SPRN_IVOR##vector_number,r3;
+#endif /* CONFIG_RELOCATABLE */
 
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index a518e48..be3b4b1 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1097,7 +1097,15 @@ skpinv:  addir6,r6,1 /* 
Increment */
  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  */
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+   /* We have to find out address from lr. */
+   bl  1f  /* Find our address */
+1: mflrr6
+   addir6,r6,(2f - 1b)
+   tovirt(r6,r6)
+#else
LOAD_REG_IMMEDIATE(r6,2f)
+#endif
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
mtspr   SPRN_SRR0,r6
@@ -1348,9 +1356,17 @@ _GLOBAL(book3e_secondary_thread_init)
mflrr28
b   3b
 
-_STATIC(init_core_book3e)
+_GLOBAL(init_core_book3e)
/* Establish the interrupt vector base */
+#ifdef CONFIG_RELOCATABLE
+/* In relocatable case the value of the constant expression 'expr' is only
+ * offset. So instead, we should loads the address of label 'name'.
+ */
+   tovirt(r2,r2)
+   LOAD_REG_ADDR(r3, interrupt_base_book3e)
+#else
LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+#endif
mtspr   SPRN_IVPR,r3
sync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b61363d..550f8fb 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -414,12 +414,25 @@ _STATIC(__after_prom_start)
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldir25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1/* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
 1: mr  r3,r25
bl  .relocate
+#if defined(CONFIG_PPC_BOOK3E)
+   /* In relocatable case we always have to load the address of label 
'name'
+* to set IVPR. So after .relocate we have to update IVPR with current
+* address of label.
+*/
+   bl  .init_core_book3e
+#endif
 #endif
 
 /*
@@ -447,12 +460,24 @@ _STATIC(__after_prom_start)
  * variable __run_at_load, if it is set the kernel is treated as relocatable
  * kernel, otherwise it will be moved to PHYSICAL_START
  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1
bne 3f
 
+#ifdef CONFIG_PPC_BOOK3E
+   LOAD_REG_ADDR(r5, __end_interrupts)
+   LOAD_REG_ADDR(r11, _stext)
+   sub r5,r5,r11
+#else
/* just copy interrupts */
LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
+#endif
b   5f
 3:
 #endif
-- 
1.

[v3][PATCH 3/8] book3e/kexec/kdump: enable kexec for kernel

2013-07-09 Thread Tiejun Chen
We need to active KEXEC for book3e and bypass or convert non-book3e stuff
in kexec coverage.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/Kconfig   |2 +-
 arch/powerpc/kernel/machine_kexec_64.c |  148 ++--
 arch/powerpc/kernel/misc_64.S  |6 ++
 3 files changed, 89 insertions(+), 67 deletions(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5374776..d945435 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -357,7 +357,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 
 config KEXEC
bool "kexec system call"
-   depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
+   depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
help
  kexec is a system call that implements the ability to shutdown your
  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/powerpc/kernel/machine_kexec_64.c 
b/arch/powerpc/kernel/machine_kexec_64.c
index 611acdf..ee153a8 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -30,72 +30,6 @@
 #include 
 #include 
 
-int default_machine_kexec_prepare(struct kimage *image)
-{
-   int i;
-   unsigned long begin, end;   /* limits of segment */
-   unsigned long low, high;/* limits of blocked memory range */
-   struct device_node *node;
-   const unsigned long *basep;
-   const unsigned int *sizep;
-
-   if (!ppc_md.hpte_clear_all)
-   return -ENOENT;
-
-   /*
-* Since we use the kernel fault handlers and paging code to
-* handle the virtual mode, we must make sure no destination
-* overlaps kernel static data or bss.
-*/
-   for (i = 0; i < image->nr_segments; i++)
-   if (image->segment[i].mem < __pa(_end))
-   return -ETXTBSY;
-
-   /*
-* For non-LPAR, we absolutely can not overwrite the mmu hash
-* table, since we are still using the bolted entries in it to
-* do the copy.  Check that here.
-*
-* It is safe if the end is below the start of the blocked
-* region (end <= low), or if the beginning is after the
-* end of the blocked region (begin >= high).  Use the
-* boolean identity !(a || b)  === (!a && !b).
-*/
-   if (htab_address) {
-   low = __pa(htab_address);
-   high = low + htab_size_bytes;
-
-   for (i = 0; i < image->nr_segments; i++) {
-   begin = image->segment[i].mem;
-   end = begin + image->segment[i].memsz;
-
-   if ((begin < high) && (end > low))
-   return -ETXTBSY;
-   }
-   }
-
-   /* We also should not overwrite the tce tables */
-   for_each_node_by_type(node, "pci") {
-   basep = of_get_property(node, "linux,tce-base", NULL);
-   sizep = of_get_property(node, "linux,tce-size", NULL);
-   if (basep == NULL || sizep == NULL)
-   continue;
-
-   low = *basep;
-   high = low + (*sizep);
-
-   for (i = 0; i < image->nr_segments; i++) {
-   begin = image->segment[i].mem;
-   end = begin + image->segment[i].memsz;
-
-   if ((begin < high) && (end > low))
-   return -ETXTBSY;
-   }
-   }
-
-   return 0;
-}
-
 #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
 
 static void copy_segments(unsigned long ind)
@@ -367,6 +301,87 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
 }
 
+#ifdef CONFIG_PPC_BOOK3E
+int default_machine_kexec_prepare(struct kimage *image)
+{
+   int i;
+   /*
+* Since we use the kernel fault handlers and paging code to
+* handle the virtual mode, we must make sure no destination
+* overlaps kernel static data or bss.
+*/
+   for (i = 0; i < image->nr_segments; i++)
+   if (image->segment[i].mem < __pa(_end))
+   return -ETXTBSY;
+   return 0;
+}
+#else /* CONFIG_PPC_BOOK3E */
+int default_machine_kexec_prepare(struct kimage *image)
+{
+   int i;
+   unsigned long begin, end;   /* limits of segment */
+   unsigned long low, high;/* limits of blocked memory range */
+   struct device_node *node;
+   const unsigned long *basep;
+   const unsigned int *sizep;
+
+   if (!ppc_md.hpte_clear_all)
+   return -ENOENT;
+
+   /*
+* Since we use the kernel fault handlers and paging code to
+* handle the virtual mode, we must make sure no destination
+*

[PATCH 2/6] book3e/kexec/kdump: enable kexec for kernel

2012-11-15 Thread Tiejun Chen
We need to active KEXEC for book3e and bypass or convert non-book3e stuff
in kexec coverage.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/Kconfig   |2 +-
 arch/powerpc/kernel/machine_kexec_64.c |6 ++
 arch/powerpc/kernel/misc_64.S  |6 ++
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a902a5c..3000cab8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -357,7 +357,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 
 config KEXEC
bool "kexec system call (EXPERIMENTAL)"
-   depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) && EXPERIMENTAL
+   depends on (PPC_BOOK3S || FSL_BOOKE || PPC_BOOK3E || (44x && !SMP)) && 
EXPERIMENTAL
help
  kexec is a system call that implements the ability to shutdown your
  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/powerpc/kernel/machine_kexec_64.c 
b/arch/powerpc/kernel/machine_kexec_64.c
index d7f6090..2c0cbf0 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -32,6 +32,7 @@
 int default_machine_kexec_prepare(struct kimage *image)
 {
int i;
+#ifndef CONFIG_PPC_BOOK3E
unsigned long begin, end;   /* limits of segment */
unsigned long low, high;/* limits of blocked memory range */
struct device_node *node;
@@ -40,6 +41,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 
if (!ppc_md.hpte_clear_all)
return -ENOENT;
+#endif
 
/*
 * Since we use the kernel fault handlers and paging code to
@@ -50,6 +52,7 @@ int default_machine_kexec_prepare(struct kimage *image)
if (image->segment[i].mem < __pa(_end))
return -ETXTBSY;
 
+#ifndef CONFIG_PPC_BOOK3E
/*
 * For non-LPAR, we absolutely can not overwrite the mmu hash
 * table, since we are still using the bolted entries in it to
@@ -91,6 +94,7 @@ int default_machine_kexec_prepare(struct kimage *image)
return -ETXTBSY;
}
}
+#endif
 
return 0;
 }
@@ -358,6 +362,7 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
 }
 
+#ifndef CONFIG_PPC_BOOK3E
 /* Values we need to export to the second kernel via the device tree. */
 static unsigned long htab_base;
 
@@ -402,3 +407,4 @@ static int __init export_htab_values(void)
return 0;
 }
 late_initcall(export_htab_values);
+#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 5cfa800..c2acf8c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -532,9 +532,13 @@ _GLOBAL(kexec_sequence)
lhz r25,PACAHWCPUID(r13)/* get our phys cpu from paca */
 
/* disable interrupts, we are overwriting kernel data next */
+#ifndef CONFIG_PPC_BOOK3E
mfmsr   r3
rlwinm  r3,r3,0,17,15
mtmsrd  r3,1
+#else
+   wrteei  0
+#endif
 
/* copy dest pages, flush whole dest image */
mr  r3,r29
@@ -556,10 +560,12 @@ _GLOBAL(kexec_sequence)
li  r6,1
stw r6,kexec_flag-1b(5)
 
+#ifndef CONFIG_PPC_BOOK3E
/* clear out hardware hash page table and tlb */
ld  r5,0(r27)   /* deref function descriptor */
mtctr   r5
bctrl   /* ppc_md.hpte_clear_all(void); */
+#endif
 
 /*
  *   kexec image calling is:
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 6/6] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

2012-11-15 Thread Tiejun Chen
Book3e is always aligned 1GB to create TLB so we should
use (KERNELBASE - MEMORY_START) as VIRT_PHYS_OFFSET to
get __pa/__va properly while boot kdump.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/page.h |2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f072e97..2cba08a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -112,6 +112,8 @@ extern long long virt_phys_offset;
 /* See Description below for VIRT_PHYS_OFFSET */
 #ifdef CONFIG_RELOCATABLE_PPC32
 #define VIRT_PHYS_OFFSET virt_phys_offset
+#elif defined(CONFIG_PPC_BOOK3E_64)
+#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START)
 #else
 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 #endif
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 4/6] book3e/kexec/kdump: introduce a kexec kernel flag

2012-11-15 Thread Tiejun Chen
We need to introduce a flag to indicate we're already running
a kexec kernel then we can go proper path. For example, We
shouldn't access spin_table from the bootloader to up any secondary
cpu for kexec kernel, and kexec kernel already know how to jump to
generic_secondary_smp_init.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/smp.h|3 +++
 arch/powerpc/kernel/head_64.S |   12 
 arch/powerpc/kernel/misc_64.S |6 ++
 arch/powerpc/platforms/85xx/smp.c |   14 ++
 4 files changed, 35 insertions(+)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index e807e9d..aadbe9b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -190,6 +190,9 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+#ifdef CONFIG_KEXEC
+extern unsigned long __run_at_kexec;
+#endif
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d51ffc0..9c30d9f 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -89,6 +89,12 @@ __secondary_hold_spinloop:
 __secondary_hold_acknowledge:
.llong  0x0
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   .globl  __run_at_kexec
+__run_at_kexec:
+   .llong  0x0 /* Flag for the secondary kernel from kexec. */
+#endif
+
 #ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
 * at the loaded address instead of the linked address.  This
@@ -441,6 +447,12 @@ _STATIC(__after_prom_start)
 #if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
 #endif
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   /* If relocated we need to restore this flag on that relocated address. 
*/
+   ld  r7,__run_at_kexec-_stext(r3)
+   std r7,__run_at_kexec-_stext(r26)
+#endif
+
lwz r7,__run_at_load-_stext(r26)
 #if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26) /* Restore for the remains. */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ffe6043..b81f8ac 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -608,6 +608,12 @@ _GLOBAL(kexec_sequence)
bl  .copy_and_flush /* (dest, src, copy limit, start offset) */
 1: /* assume normal blr return */
 
+   /* notify we're going into kexec kernel for SMP. */
+   LOAD_REG_ADDR(r3,__run_at_kexec)
+   li  r4,1
+   std r4,0(r3)
+   sync
+
/* release other cpus to the new kernel secondary start at 0x60 */
mflrr5
li  r6,1
diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index 6fcfa12..c7febd5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -137,6 +137,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
int hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
int ret = 0;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   unsigned long *ptr;
+#endif
 
WARN_ON(nr < 0 || nr >= NR_CPUS);
WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -213,6 +216,14 @@ out:
 #else
smp_generic_kick_cpu(nr);
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);
+   /* We shouldn't access spin_table from the bootloader to up any
+* secondary cpu for kexec kernel, and kexec kernel already
+* know how to jump to generic_secondary_smp_init.
+*/
+   if (!*ptr) {
+#endif
out_be32(&spin_table->pir, hw_cpu);
out_be64((u64 *)(&spin_table->addr_h),
  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
@@ -220,6 +231,9 @@ out:
if (!ioremappable)
flush_dcache_range((ulong)spin_table,
(ulong)spin_table + sizeof(struct epapr_spin_table));
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   }
+#endif
 #endif
 
local_irq_restore(flags);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 0/6] powerpc/book3e: support kexec and kdump

2012-11-15 Thread Tiejun Chen
This patchset is used to support kexec and kdump on book3e.

Tested on fsl-p5040 DS.

Tiejun Chen (6):
  powerpc/book3e: support CONFIG_RELOCATABLE
  book3e/kexec/kdump: enable kexec for kernel
  book3e/kexec/kdump: create a 1:1 TLB mapping
  book3e/kexec/kdump: introduce a kexec kernel flag
  book3e/kexec/kdump: skip ppc32 kexec specfic
  book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

 arch/powerpc/Kconfig |2 +-
 arch/powerpc/include/asm/exception-64e.h |8 
 arch/powerpc/include/asm/page.h  |2 +
 arch/powerpc/include/asm/smp.h   |3 ++
 arch/powerpc/kernel/exceptions-64e.S |   15 ++-
 arch/powerpc/kernel/head_64.S|   43 +--
 arch/powerpc/kernel/machine_kexec_64.c   |6 +++
 arch/powerpc/kernel/misc_64.S|   67 +-
 arch/powerpc/lib/feature-fixups.c|7 
 arch/powerpc/platforms/85xx/smp.c|   26 
 10 files changed, 173 insertions(+), 6 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 5/6] book3e/kexec/kdump: skip ppc32 kexec specfic

2012-11-15 Thread Tiejun Chen
ppc64 kexec mechanism has a different implementation with ppc32
so skipp those ppc32 specfic.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/platforms/85xx/smp.c |   12 
 1 file changed, 12 insertions(+)

diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index c7febd5..d3ec57c 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -257,6 +257,7 @@ struct smp_ops_t smp_85xx_ops = {
 };
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC32
 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 
 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
@@ -275,6 +276,13 @@ static void mpc85xx_smp_kexec_down(void *arg)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0,1);
 }
+#else
+void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+   local_irq_disable();
+   mpic_teardown_this_cpu(secondary);
+}
+#endif
 
 static void map_and_flush(unsigned long paddr)
 {
@@ -326,11 +334,14 @@ static void mpc85xx_smp_flush_dcache_kexec(struct kimage 
*image)
 
 static void mpc85xx_smp_machine_kexec(struct kimage *image)
 {
+#ifdef CONFIG_PPC32
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
+#endif
 
mpc85xx_smp_flush_dcache_kexec(image);
 
+#ifdef CONFIG_PPC32
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 
@@ -348,6 +359,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);
}
+#endif
 
default_machine_kexec(image);
 }
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/6] powerpc/book3e: support CONFIG_RELOCATABLE

2012-11-15 Thread Tiejun Chen
book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.

And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/exception-64e.h |8 
 arch/powerpc/kernel/exceptions-64e.S |   15 ++-
 arch/powerpc/kernel/head_64.S|   22 ++
 arch/powerpc/lib/feature-fixups.c|7 +++
 4 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/exception-64e.h 
b/arch/powerpc/include/asm/exception-64e.h
index 51fa43e..89e940d 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -214,10 +214,18 @@ exc_##label##_book3e:
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED
 #endif
 
+#ifndef CONFIG_RELOCATABLE
 #define SET_IVOR(vector_number, vector_offset) \
li  r3,vector_offset@l; \
ori r3,r3,interrupt_base_book3e@l;  \
mtspr   SPRN_IVOR##vector_number,r3;
+#else
+#define SET_IVOR(vector_number, vector_offset) \
+   LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+   rlwinm  r3,r3,0,15,0;   \
+   ori r3,r3,vector_offset@l;  \
+   mtspr   SPRN_IVOR##vector_number,r3;
+#endif
 
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 4e7083e..82be30b 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1104,7 +1104,15 @@ skpinv:  addir6,r6,1 /* 
Increment */
  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  */
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+   /* We have to find out address from lr. */
+   bl  1f  /* Find our address */
+1: mflrr6
+   addir6,r6,(2f - 1b)
+   tovirt(r6,r6)
+#else
LOAD_REG_IMMEDIATE(r6,2f)
+#endif
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
mtspr   SPRN_SRR0,r6
@@ -1355,9 +1363,14 @@ _GLOBAL(book3e_secondary_thread_init)
mflrr28
b   3b
 
-_STATIC(init_core_book3e)
+_GLOBAL(init_core_book3e)
/* Establish the interrupt vector base */
+#ifdef CONFIG_RELOCATABLE
+   tovirt(r2,r2)
+   LOAD_REG_ADDR(r3, interrupt_base_book3e)
+#else
LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+#endif
mtspr   SPRN_IVPR,r3
sync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 9e07bd0..aa7df52 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -395,12 +395,22 @@ _STATIC(__after_prom_start)
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldir25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1/* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
 1: mr  r3,r25
bl  .relocate
+#if defined(CONFIG_PPC_BOOK3E)
+   /* We should set ivpr again after .relocate. */
+   bl  .init_core_book3e
+#endif
 #endif
 
 /*
@@ -428,11 +438,23 @@ _STATIC(__after_prom_start)
  * variable __run_at_load, if it is set the kernel is treated as relocatable
  * kernel, otherwise it will be moved to PHYSICAL_START
  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1
bne 3f
 
+#ifdef CONFIG_PPC_BOOK3E
+   LOAD_REG_ADDR(r5, interrupt_end_book3e)
+   LOAD_REG_ADDR(r11, _stext)
+   sub r5,r5,r11
+#else
li  r5,__end_interrupts - _stext/* just copy interrupts */
+#endif
b   5f
 3:
 #endif
diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index 7a8a748..13f20ed 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -135,13 +135,20 @@ void do_final_fixups(void)
 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
int *src, *dest;
unsigned long length;
+#ifdef CONFIG_PPC_BOOK3E
+   extern char interrupt_end_book3e[];
+#endif
 
if (PHYSICAL_START == 0)
return;
 
src = (int *

[PATCH 3/6] book3e/kexec/kdump: create a 1:1 TLB mapping

2012-11-15 Thread Tiejun Chen
book3e have no real MMU mode so we have to create a 1:1 TLB
mapping to make sure we can access the real physical address.
And correct something to support this pseudo real mode on book3e.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/head_64.S |9 ---
 arch/powerpc/kernel/misc_64.S |   55 -
 2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index aa7df52..d51ffc0 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -425,12 +425,12 @@ _STATIC(__after_prom_start)
tovirt(r3,r3)   /* on booke, we already run at 
PAGE_OFFSET */
 #endif
mr. r4,r26  /* In some cases the loader may  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r4,r4)
+#endif
beq 9f  /* have already put us at zero */
li  r6,0x100/* Start offset, the first 0x100 */
/* bytes were copied earlier.*/
-#ifdef CONFIG_PPC_BOOK3E
-   tovirt(r6,r6)   /* on booke, we already run at 
PAGE_OFFSET */
-#endif
 
 #ifdef CONFIG_RELOCATABLE
 /*
@@ -472,6 +472,9 @@ _STATIC(__after_prom_start)
 p_end: .llong  _end - _stext
 
 4: /* Now copy the rest of the kernel up to _end */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26)
+#endif
addis   r5,r26,(p_end - _stext)@ha
ld  r5,(p_end - _stext)@l(r5)   /* get _end */
 5: bl  .copy_and_flush /* copy the rest */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c2acf8c..ffe6043 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -449,6 +449,49 @@ kexec_flag:
 
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/* BOOK3E have no a real MMU mode so we have to setup the initial TLB
+ * for a core to map v:0 to p:0 as 1:1. This current implementation
+ * assume that 1G is enough for kexec.
+ */
+#include 
+kexec_create_tlb:
+   /* Invalidate all TLBs to avoid any TLB conflict. */
+   PPC_TLBILX_ALL(0,R0)
+   sync
+   isync
+
+   mfspr   r10,SPRN_TLB1CFG
+   andi.   r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
+   subir10,r10,1   /* Often its always safe to use last */
+   lis r9,MAS0_TLBSEL(1)@h
+   rlwimi  r9,r10,16,4,15  /* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it.
+ */
+#ifdef CONFIG_SMP
+#define M_IF_SMP   MAS2_M
+#else
+#define M_IF_SMP   0
+#endif
+   mtspr   SPRN_MAS0,r9
+
+   lis r9,(MAS1_VALID|MAS1_IPROT)@h
+   ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+   mtspr   SPRN_MAS1,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP)
+   mtspr   SPRN_MAS2,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+   mtspr   SPRN_MAS3,r9
+   li  r9,0
+   mtspr   SPRN_MAS7,r9
+
+   tlbwe
+   isync
+   blr
+#endif
 
 /* kexec_smp_wait(void)
  *
@@ -462,6 +505,10 @@ kexec_flag:
  */
 _GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
+#ifdef CONFIG_PPC_BOOK3E
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
+#endif
bl  real_mode
 
li  r4,KEXEC_STATE_REAL_MODE
@@ -478,6 +525,7 @@ _GLOBAL(kexec_smp_wait)
  * don't overwrite r3 here, it is live for kexec_wait above.
  */
 real_mode: /* assume normal blr return */
+#ifndef CONFIG_PPC_BOOK3E
 1: li  r9,MSR_RI
li  r10,MSR_DR|MSR_IR
mflrr11 /* return address to SRR0 */
@@ -489,7 +537,10 @@ real_mode: /* assume normal blr return */
mtspr   SPRN_SRR1,r10
mtspr   SPRN_SRR0,r11
rfid
-
+#else
+   /* the real mode is nothing for book3e. */
+   blr
+#endif
 
 /*
  * kexec_sequence(newstack, start, image, control, clear_all())
@@ -538,6 +589,8 @@ _GLOBAL(kexec_sequence)
mtmsrd  r3,1
 #else
wrteei  0
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
 #endif
 
/* copy dest pages, flush whole dest image */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 1/6] powerpc/book3e: load critical/machine/debug exception stack

2012-12-20 Thread Tiejun Chen
We always alloc critical/machine/debug check exceptions. This is
different from the normal exception. So we should load these exception
stack properly like we did for booke.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   40 +++---
 1 file changed, 37 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 4684e33..376e3d1 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -36,6 +36,28 @@
  */
 #defineSPECIAL_EXC_FRAME_SIZE  INT_FRAME_SIZE
 
+/* only on book3e */
+#define DBG_STACK_BASE dbgirq_ctx
+#define MC_STACK_BASE  mcheckirq_ctx
+#define CRIT_STACK_BASEcritirq_ctx
+
+#ifdef CONFIG_SMP
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   mfspr   r14,SPRN_PIR;   \
+   slwir14,r14,3;  \
+   LOAD_REG_ADDR(r10, level##_STACK_BASE); \
+   add r10,r10,r14;\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#else
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   LOAD_REG_ADDR(r10, level##_STACK_BASE); \
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#endif
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -68,20 +90,32 @@
 #define SPRN_GDBELL_SRR1   SPRN_GSRR1
 
 #define CRIT_SET_KSTACK
\
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
 
 #define DBG_SET_KSTACK \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
 
 #define MC_SET_KSTACK  \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 0/6] powerpc/book3e: make kgdb to work well

2012-12-20 Thread Tiejun Chen
v1:
* Copy thread info only when we are from !user mode since we'll get kernel stack
  coming from usr directly.
* remove save/restore EX_R14/EX_R15 since DBG_EXCEPTION_PROLOG already covered
  this.
* use CURRENT_THREAD_INFO() conveniently to get thread.
* fix some typos
* add a patch to make sure gdb can generate a single step properly to invoke a
  kgdb state.
* add a patch to if we need to replay an interrupt, we shouldn't restore that
  previous backup thread info to make sure we can replay an interrupt lately
  with a proper thread info.
* rebase latest powerpc git tree

v0:
This patchset is used to support kgdb for book3e.

Tiejun Chen (6):
  powerpc/book3e: load critical/machine/debug exception stack
  powerpc/book3e: store critical/machine/debug exception thread info
  book3e/kgdb: update thread's dbcr0
  book3e/kgdb: Fix a single stgep case of lazy IRQ
  powerpc/book3e: support kgdb for kernel space
  kgdb/kgdbts: support ppc64

 arch/powerpc/kernel/exceptions-64e.S |   60 +++---
 arch/powerpc/kernel/irq.c|   10 ++
 arch/powerpc/kernel/kgdb.c   |   13 +++-
 drivers/misc/kgdbts.c|2 ++
 4 files changed, 80 insertions(+), 5 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 2/6] powerpc/book3e: store critical/machine/debug exception thread info

2012-12-20 Thread Tiejun Chen
We need to store thread info to these exception thread info like something
we already did for PPC32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   15 +++
 1 file changed, 15 insertions(+)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 376e3d1..9a99cba 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -58,6 +58,18 @@
std r10,PACA_##level##_STACK(r13);
 #endif
 
+/* Store something to exception thread info */
+#defineBOOK3E_STORE_EXC_LEVEL_THEAD_INFO(type) 
\
+   ld  r14,PACA_EX##type+EX_R1(r13);   
\
+   CURRENT_THREAD_INFO(r14, r14);  
\
+   CURRENT_THREAD_INFO(r15, r1);   
\
+   ld  r10,TI_FLAGS(r14);  
\
+   std r10,TI_FLAGS(r15);  
\
+   ld  r10,TI_PREEMPT(r14);
\
+   std r10,TI_PREEMPT(r15);
\
+   ld  r10,TI_TASK(r14);   
\
+   std r10,TI_TASK(r15);
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -95,6 +107,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(CRIT);
\
 1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
@@ -105,6 +118,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(DBG); 
\
 1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
@@ -115,6 +129,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(MC);  
\
 1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 3/6] book3e/kgdb: update thread's dbcr0

2012-12-20 Thread Tiejun Chen
gdb always need to generate a single step properly to invoke
a kgdb state. But with lazy interrupt, book3e can't always
trigger a debug exception with a single step since the current
is blocked for handling those pending exception, then we miss
that expected dbcr configuration at last to generate a debug
exception.

So here we also update thread's dbcr0 to make sure the current
can go back with that missed dbcr0 configuration.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |   10 ++
 1 file changed, 10 insertions(+)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index c470a40..516b44b 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -426,8 +426,18 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
/* set the trace bit if we're stepping */
if (remcom_in_buffer[0] == 's') {
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
+#ifdef CONFIG_PPC_BOOK3E
+   /* With lazy interrut we have to update thread dbcr0 
here
+* to make sure we can set debug properly at last to 
invoke
+* kgdb again to work well.
+*/
+   current->thread.dbcr0 =
+   mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM;
+   mtspr(SPRN_DBCR0, current->thread.dbcr0);
+#else
mtspr(SPRN_DBCR0,
  mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+#endif
linux_regs->msr |= MSR_DE;
 #else
linux_regs->msr |= MSR_SE;
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 6/6] kgdb/kgdbts: support ppc64

2012-12-20 Thread Tiejun Chen
We can't look up the address of the entry point of the function simply
via that function symbol for all architectures.

For PPC64 ABI, actually there is a function descriptors structure.

A function descriptor is a three doubleword data structure that contains
the following values:
* The first doubleword contains the address of the entry point of
the function.
* The second doubleword contains the TOC base address for
the function.
* The third doubleword contains the environment pointer for
languages such as Pascal and PL/1.

So we should call a wapperred dereference_function_descriptor() to get
the address of the entry point of the function.

Note this is also safe for other architecture after refer to
"include/asm-generic/sections.h" since:

dereference_function_descriptor(p) always is (p) if without arched definition.

Signed-off-by: Tiejun Chen 
---
 drivers/misc/kgdbts.c |2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3aa9a96..4799e1f 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -103,6 +103,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define v1printk(a...) do { \
if (verbose) \
@@ -222,6 +223,7 @@ static unsigned long lookup_addr(char *arg)
addr = (unsigned long)do_fork;
else if (!strcmp(arg, "hw_break_val"))
addr = (unsigned long)&hw_break_val;
+   addr = (unsigned long )dereference_function_descriptor((void *)addr);
return addr;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 5/6] powerpc/book3e: support kgdb for kernel space

2012-12-20 Thread Tiejun Chen
Currently we need to skip this for supporting KGDB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 9a99cba..a24baef 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -588,11 +588,14 @@ kernel_dbg_exc:
rfdi
 
/* Normal debug exception */
+1:
+#ifndef CONFIG_KGDB
/* XXX We only handle coming from userspace for now since we can't
 * quite save properly an interrupted kernel state yet
 */
-1: andi.   r14,r11,MSR_PR; /* check for userspace again */
+   andi.   r14,r11,MSR_PR; /* check for userspace again */
beq kernel_dbg_exc; /* if from kernel mode */
+#endif
 
/* Now we mash up things to make it look like we are coming on a
 * normal exception
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 4/6] book3e/kgdb: Fix a single stgep case of lazy IRQ

2012-12-20 Thread Tiejun Chen
When we're in kgdb_singlestep(), we have to work around to get
thread_info by copying from the kernel stack before calling
kgdb_handle_exception(), then copying it back afterwards.

But for PPC64, we have a lazy interrupt implementation. So after
copying thread info frome kernle stack, if we need to replay an
interrupt, we shouldn't restore that previous backup thread info
to make sure we can replay an interrupt lately with a proper
thread info.

This patch use __check_irq_replay() to guarantee this process.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/irq.c  |   10 ++
 arch/powerpc/kernel/kgdb.c |3 ++-
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 71413f4..30a9cb0 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -332,7 +332,17 @@ bool prep_irq_for_idle(void)
return true;
 }
 
+notrace unsigned int check_irq_replay(void)
+{
+   return __check_irq_replay();
+}
+#else
+notrace unsigned int check_irq_replay(void)
+{
+   return 0;
+}
 #endif /* CONFIG_PPC64 */
+EXPORT_SYMBOL(check_irq_replay);
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 516b44b..6a48db9 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -151,6 +151,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 1;
 }
 
+extern notrace unsigned int check_irq_replay(void);
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
@@ -181,7 +182,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
 
kgdb_handle_exception(0, SIGTRAP, 0, regs);
 
-   if (thread_info != exception_thread_info)
+   if ((thread_info != exception_thread_info) && (!check_irq_replay()))
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 7/7] book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

2013-06-20 Thread Tiejun Chen
In commit 96f013f, "powerpc/kexec: Add kexec "hold" support for Book3e
processors", requires that GPR4 survive the "hold" process, for IBM Blue
Gene/Q with with some very strange firmware. But for FSL Book3E, r4 = 1
to indicate that the initial TLB entry for this core already exists so
we still should set r4 with 0 to create that initial TLB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/head_64.S |4 
 1 file changed, 4 insertions(+)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ffa4b18..63ed1c3 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -129,6 +129,10 @@ __secondary_hold:
/* Grab our physical cpu number */
mr  r24,r3
/* stash r4 for book3e */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+   /* we need to setup initial TLB entry. */
+   li  r4,0
+#endif
mr  r25,r4
 
/* Tell the master cpu we're here */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 6/7] book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET

2013-06-20 Thread Tiejun Chen
Book3e is always aligned 1GB to create TLB so we should
use (KERNELBASE - MEMORY_START) as VIRT_PHYS_OFFSET to
get __pa/__va properly while boot kdump.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/page.h |2 ++
 1 file changed, 2 insertions(+)

diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812..5b00081 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -112,6 +112,8 @@ extern long long virt_phys_offset;
 /* See Description below for VIRT_PHYS_OFFSET */
 #ifdef CONFIG_RELOCATABLE_PPC32
 #define VIRT_PHYS_OFFSET virt_phys_offset
+#elif defined(CONFIG_PPC_BOOK3E_64)
+#define VIRT_PHYS_OFFSET (KERNELBASE - MEMORY_START)
 #else
 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
 #endif
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 3/7] book3e/kexec/kdump: create a 1:1 TLB mapping

2013-06-20 Thread Tiejun Chen
book3e have no real MMU mode so we have to create a 1:1 TLB
mapping to make sure we can access the real physical address.
And correct something to support this pseudo real mode on book3e.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/head_64.S |9 ---
 arch/powerpc/kernel/misc_64.S |   55 -
 2 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 0942f3a..3e19ba2 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -444,12 +444,12 @@ _STATIC(__after_prom_start)
tovirt(r3,r3)   /* on booke, we already run at 
PAGE_OFFSET */
 #endif
mr. r4,r26  /* In some cases the loader may  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r4,r4)
+#endif
beq 9f  /* have already put us at zero */
li  r6,0x100/* Start offset, the first 0x100 */
/* bytes were copied earlier.*/
-#ifdef CONFIG_PPC_BOOK3E
-   tovirt(r6,r6)   /* on booke, we already run at 
PAGE_OFFSET */
-#endif
 
 #ifdef CONFIG_RELOCATABLE
 /*
@@ -492,6 +492,9 @@ _STATIC(__after_prom_start)
 p_end: .llong  _end - _stext
 
 4: /* Now copy the rest of the kernel up to _end */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26)
+#endif
addis   r5,r26,(p_end - _stext)@ha
ld  r5,(p_end - _stext)@l(r5)   /* get _end */
 5: bl  .copy_and_flush /* copy the rest */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index f1a7ce7..20cbb98 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -460,6 +460,49 @@ kexec_flag:
 
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/* BOOK3E have no a real MMU mode so we have to setup the initial TLB
+ * for a core to map v:0 to p:0 as 1:1. This current implementation
+ * assume that 1G is enough for kexec.
+ */
+#include 
+kexec_create_tlb:
+   /* Invalidate all TLBs to avoid any TLB conflict. */
+   PPC_TLBILX_ALL(0,R0)
+   sync
+   isync
+
+   mfspr   r10,SPRN_TLB1CFG
+   andi.   r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
+   subir10,r10,1   /* Often its always safe to use last */
+   lis r9,MAS0_TLBSEL(1)@h
+   rlwimi  r9,r10,16,4,15  /* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Setup a temp mapping v:0 to p:0 as 1:1 and return to it.
+ */
+#ifdef CONFIG_SMP
+#define M_IF_SMP   MAS2_M
+#else
+#define M_IF_SMP   0
+#endif
+   mtspr   SPRN_MAS0,r9
+
+   lis r9,(MAS1_VALID|MAS1_IPROT)@h
+   ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+   mtspr   SPRN_MAS1,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_SMP)
+   mtspr   SPRN_MAS2,r9
+
+   LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+   mtspr   SPRN_MAS3,r9
+   li  r9,0
+   mtspr   SPRN_MAS7,r9
+
+   tlbwe
+   isync
+   blr
+#endif
 
 /* kexec_smp_wait(void)
  *
@@ -473,6 +516,10 @@ kexec_flag:
  */
 _GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
+#ifdef CONFIG_PPC_BOOK3E
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
+#endif
bl  real_mode
 
li  r4,KEXEC_STATE_REAL_MODE
@@ -489,6 +536,7 @@ _GLOBAL(kexec_smp_wait)
  * don't overwrite r3 here, it is live for kexec_wait above.
  */
 real_mode: /* assume normal blr return */
+#ifndef CONFIG_PPC_BOOK3E
 1: li  r9,MSR_RI
li  r10,MSR_DR|MSR_IR
mflrr11 /* return address to SRR0 */
@@ -500,7 +548,10 @@ real_mode: /* assume normal blr return */
mtspr   SPRN_SRR1,r10
mtspr   SPRN_SRR0,r11
rfid
-
+#else
+   /* the real mode is nothing for book3e. */
+   blr
+#endif
 
 /*
  * kexec_sequence(newstack, start, image, control, clear_all())
@@ -549,6 +600,8 @@ _GLOBAL(kexec_sequence)
mtmsrd  r3,1
 #else
wrteei  0
+   /* Create a 1:1 mapping. */
+   bl  kexec_create_tlb
 #endif
 
/* copy dest pages, flush whole dest image */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 4/7] book3e/kexec/kdump: introduce a kexec kernel flag

2013-06-20 Thread Tiejun Chen
We need to introduce a flag to indicate we're already running
a kexec kernel then we can go proper path. For example, We
shouldn't access spin_table from the bootloader to up any secondary
cpu for kexec kernel, and kexec kernel already know how to jump to
generic_secondary_smp_init.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/smp.h|3 +++
 arch/powerpc/kernel/head_64.S |   12 
 arch/powerpc/kernel/misc_64.S |6 ++
 arch/powerpc/platforms/85xx/smp.c |   14 ++
 4 files changed, 35 insertions(+)

diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabe..fbc3d9b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -200,6 +200,9 @@ extern void generic_secondary_thread_init(void);
 extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+extern unsigned long __run_at_kexec;
+#endif
 
 extern void __early_start(void);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3e19ba2..ffa4b18 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -89,6 +89,12 @@ __secondary_hold_spinloop:
 __secondary_hold_acknowledge:
.llong  0x0
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   .globl  __run_at_kexec
+__run_at_kexec:
+   .llong  0x0 /* Flag for the secondary kernel from kexec. */
+#endif
+
 #ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
 * at the loaded address instead of the linked address.  This
@@ -417,6 +423,12 @@ _STATIC(__after_prom_start)
 #if defined(CONFIG_PPC_BOOK3E)
tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
 #endif
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   /* If relocated we need to restore this flag on that relocated address. 
*/
+   ld  r7,__run_at_kexec-_stext(r26)
+   std r7,__run_at_kexec-_stext(r26)
+#endif
+
lwz r7,__run_at_load-_stext(r26)
 #if defined(CONFIG_PPC_BOOK3E)
tophys(r26,r26) /* Restore for the remains. */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 20cbb98..c89aead 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -619,6 +619,12 @@ _GLOBAL(kexec_sequence)
bl  .copy_and_flush /* (dest, src, copy limit, start offset) */
 1: /* assume normal blr return */
 
+   /* notify we're going into kexec kernel for SMP. */
+   LOAD_REG_ADDR(r3,__run_at_kexec)
+   li  r4,1
+   std r4,0(r3)
+   sync
+
/* release other cpus to the new kernel secondary start at 0x60 */
mflrr5
li  r6,1
diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index 6a17599..b308373 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -150,6 +150,9 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
int hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
int ret = 0;
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   unsigned long *ptr;
+#endif
 
WARN_ON(nr < 0 || nr >= NR_CPUS);
WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
@@ -238,11 +241,22 @@ out:
 #else
smp_generic_kick_cpu(nr);
 
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   ptr  = (unsigned long *)((unsigned long)&__run_at_kexec);
+   /* We shouldn't access spin_table from the bootloader to up any
+* secondary cpu for kexec kernel, and kexec kernel already
+* know how to jump to generic_secondary_smp_init.
+*/
+   if (!*ptr) {
+#endif
flush_spin_table(spin_table);
out_be32(&spin_table->pir, hw_cpu);
out_be64((u64 *)(&spin_table->addr_h),
  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
flush_spin_table(spin_table);
+#if defined(CONFIG_KEXEC) || defined(CONFIG_CRASH_DUMP)
+   }
+#endif
 #endif
 
local_irq_restore(flags);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 5/7] book3e/kexec/kdump: implement ppc64 kexec specfic

2013-06-20 Thread Tiejun Chen
ppc64 kexec mechanism has a different implementation with ppc32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/platforms/85xx/smp.c |   13 +
 1 file changed, 13 insertions(+)

diff --git a/arch/powerpc/platforms/85xx/smp.c 
b/arch/powerpc/platforms/85xx/smp.c
index b308373..18a5f8a 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -280,6 +280,7 @@ struct smp_ops_t smp_85xx_ops = {
 };
 
 #ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC32
 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 
 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
@@ -298,6 +299,14 @@ static void mpc85xx_smp_kexec_down(void *arg)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0,1);
 }
+#else
+void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+   local_irq_disable();
+   hard_irq_disable();
+   mpic_teardown_this_cpu(secondary);
+}
+#endif
 
 static void map_and_flush(unsigned long paddr)
 {
@@ -349,11 +358,14 @@ static void mpc85xx_smp_flush_dcache_kexec(struct kimage 
*image)
 
 static void mpc85xx_smp_machine_kexec(struct kimage *image)
 {
+#ifdef CONFIG_PPC32
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
+#endif
 
mpc85xx_smp_flush_dcache_kexec(image);
 
+#ifdef CONFIG_PPC32
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 
@@ -371,6 +383,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);
}
+#endif
 
default_machine_kexec(image);
 }
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 0/7] powerpc/book3e: support kexec and kdump

2013-06-20 Thread Tiejun Chen
This patchset is used to support kexec and kdump on book3e.

Tested on fsl-p5040 DS.

v2:
* rebase on merge branch as Ben mention now.

v1:
* improve some patch head
* rebase on next branch with patch 7


Tiejun Chen (7):
  powerpc/book3e: support CONFIG_RELOCATABLE
  book3e/kexec/kdump: enable kexec for kernel
  book3e/kexec/kdump: create a 1:1 TLB mapping
  book3e/kexec/kdump: introduce a kexec kernel flag
  book3e/kexec/kdump: implement ppc64 kexec specfic
  book3e/kexec/kdump: redefine VIRT_PHYS_OFFSET
  book3e/kexec/kdump: recover "r4 = 0" to create the initial TLB

 arch/powerpc/Kconfig |2 +-
 arch/powerpc/include/asm/exception-64e.h |8 
 arch/powerpc/include/asm/page.h  |2 +
 arch/powerpc/include/asm/smp.h   |3 ++
 arch/powerpc/kernel/exceptions-64e.S |   15 ++-
 arch/powerpc/kernel/head_64.S|   47 +++--
 arch/powerpc/kernel/machine_kexec_64.c   |6 +++
 arch/powerpc/kernel/misc_64.S|   67 +-
 arch/powerpc/lib/feature-fixups.c|7 
 arch/powerpc/platforms/85xx/smp.c|   27 
 10 files changed, 178 insertions(+), 6 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 2/7] book3e/kexec/kdump: enable kexec for kernel

2013-06-20 Thread Tiejun Chen
We need to active KEXEC for book3e and bypass or convert non-book3e stuff
in kexec coverage.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/Kconfig   |2 +-
 arch/powerpc/kernel/machine_kexec_64.c |6 ++
 arch/powerpc/kernel/misc_64.S  |6 ++
 3 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c33e3ad..6ecf3c9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -364,7 +364,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
 
 config KEXEC
bool "kexec system call"
-   depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
+   depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
help
  kexec is a system call that implements the ability to shutdown your
  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/powerpc/kernel/machine_kexec_64.c 
b/arch/powerpc/kernel/machine_kexec_64.c
index 611acdf..ef39271 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -33,6 +33,7 @@
 int default_machine_kexec_prepare(struct kimage *image)
 {
int i;
+#ifndef CONFIG_PPC_BOOK3E
unsigned long begin, end;   /* limits of segment */
unsigned long low, high;/* limits of blocked memory range */
struct device_node *node;
@@ -41,6 +42,7 @@ int default_machine_kexec_prepare(struct kimage *image)
 
if (!ppc_md.hpte_clear_all)
return -ENOENT;
+#endif
 
/*
 * Since we use the kernel fault handlers and paging code to
@@ -51,6 +53,7 @@ int default_machine_kexec_prepare(struct kimage *image)
if (image->segment[i].mem < __pa(_end))
return -ETXTBSY;
 
+#ifndef CONFIG_PPC_BOOK3E
/*
 * For non-LPAR, we absolutely can not overwrite the mmu hash
 * table, since we are still using the bolted entries in it to
@@ -92,6 +95,7 @@ int default_machine_kexec_prepare(struct kimage *image)
return -ETXTBSY;
}
}
+#endif
 
return 0;
 }
@@ -367,6 +371,7 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
 }
 
+#ifndef CONFIG_PPC_BOOK3E
 /* Values we need to export to the second kernel via the device tree. */
 static unsigned long htab_base;
 
@@ -411,3 +416,4 @@ static int __init export_htab_values(void)
return 0;
 }
 late_initcall(export_htab_values);
+#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 6820e45..f1a7ce7 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -543,9 +543,13 @@ _GLOBAL(kexec_sequence)
lhz r25,PACAHWCPUID(r13)/* get our phys cpu from paca */
 
/* disable interrupts, we are overwriting kernel data next */
+#ifndef CONFIG_PPC_BOOK3E
mfmsr   r3
rlwinm  r3,r3,0,17,15
mtmsrd  r3,1
+#else
+   wrteei  0
+#endif
 
/* copy dest pages, flush whole dest image */
mr  r3,r29
@@ -567,10 +571,12 @@ _GLOBAL(kexec_sequence)
li  r6,1
stw r6,kexec_flag-1b(5)
 
+#ifndef CONFIG_PPC_BOOK3E
/* clear out hardware hash page table and tlb */
ld  r5,0(r27)   /* deref function descriptor */
mtctr   r5
bctrl   /* ppc_md.hpte_clear_all(void); */
+#endif
 
 /*
  *   kexec image calling is:
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 1/7] powerpc/book3e: support CONFIG_RELOCATABLE

2013-06-20 Thread Tiejun Chen
book3e is different with book3s since 3s includes the exception
vectors code in head_64.S as it relies on absolute addressing
which is only possible within this compilation unit. So we have
to get that label address with got.

And when boot a relocated kernel, we should reset ipvr properly again
after .relocate.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/include/asm/exception-64e.h |8 
 arch/powerpc/kernel/exceptions-64e.S |   15 ++-
 arch/powerpc/kernel/head_64.S|   22 ++
 arch/powerpc/lib/feature-fixups.c|7 +++
 4 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/exception-64e.h 
b/arch/powerpc/include/asm/exception-64e.h
index 51fa43e..89e940d 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -214,10 +214,18 @@ exc_##label##_book3e:
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED
 #endif
 
+#ifndef CONFIG_RELOCATABLE
 #define SET_IVOR(vector_number, vector_offset) \
li  r3,vector_offset@l; \
ori r3,r3,interrupt_base_book3e@l;  \
mtspr   SPRN_IVOR##vector_number,r3;
+#else
+#define SET_IVOR(vector_number, vector_offset) \
+   LOAD_REG_ADDR(r3,interrupt_base_book3e);\
+   rlwinm  r3,r3,0,15,0;   \
+   ori r3,r3,vector_offset@l;  \
+   mtspr   SPRN_IVOR##vector_number,r3;
+#endif
 
 #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
 
diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 645170a..4b23119 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1097,7 +1097,15 @@ skpinv:  addir6,r6,1 /* 
Increment */
  * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
  */
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+   /* We have to find out address from lr. */
+   bl  1f  /* Find our address */
+1: mflrr6
+   addir6,r6,(2f - 1b)
+   tovirt(r6,r6)
+#else
LOAD_REG_IMMEDIATE(r6,2f)
+#endif
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
mtspr   SPRN_SRR0,r6
@@ -1348,9 +1356,14 @@ _GLOBAL(book3e_secondary_thread_init)
mflrr28
b   3b
 
-_STATIC(init_core_book3e)
+_GLOBAL(init_core_book3e)
/* Establish the interrupt vector base */
+#ifdef CONFIG_RELOCATABLE
+   tovirt(r2,r2)
+   LOAD_REG_ADDR(r3, interrupt_base_book3e)
+#else
LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+#endif
mtspr   SPRN_IVPR,r3
sync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b61363d..0942f3a 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -414,12 +414,22 @@ _STATIC(__after_prom_start)
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldir25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1/* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
 1: mr  r3,r25
bl  .relocate
+#if defined(CONFIG_PPC_BOOK3E)
+   /* We should set ivpr again after .relocate. */
+   bl  .init_core_book3e
+#endif
 #endif
 
 /*
@@ -447,12 +457,24 @@ _STATIC(__after_prom_start)
  * variable __run_at_load, if it is set the kernel is treated as relocatable
  * kernel, otherwise it will be moved to PHYSICAL_START
  */
+#if defined(CONFIG_PPC_BOOK3E)
+   tovirt(r26,r26) /* on booke, we already run at 
PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+   tophys(r26,r26) /* Restore for the remains. */
+#endif
cmplwi  cr0,r7,1
bne 3f
 
+#ifdef CONFIG_PPC_BOOK3E
+   LOAD_REG_ADDR(r5, interrupt_end_book3e)
+   LOAD_REG_ADDR(r11, _stext)
+   sub r5,r5,r11
+#else
/* just copy interrupts */
LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
+#endif
b   5f
 3:
 #endif
diff --git a/arch/powerpc/lib/feature-fixups.c 
b/arch/powerpc/lib/feature-fixups.c
index 7a8a748..13f20ed 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -135,13 +135,20 @@ void do_final_fixups(void)
 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
int *src, *dest;
unsigned long length;
+#ifdef CONFIG_PPC_BOOK3E
+   extern char interrupt_end_book3e[];
+#endif
 
if (PHYSICAL_START == 0)
return;
 
  

[v5][PATCH 2/6] powerpc/book3e: store critical/machine/debug exception thread info

2013-06-20 Thread Tiejun Chen
We need to store thread info to these exception thread info like something
we already did for PPC32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   15 +++
 1 file changed, 15 insertions(+)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 4d8e57f..07cf657 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -67,6 +67,18 @@
std r10,PACA_##level##_STACK(r13);
 #endif
 
+/* Store something to exception thread info */
+#defineBOOK3E_STORE_EXC_LEVEL_THEAD_INFO(type) 
\
+   ld  r14,PACAKSAVE(r13); 
\
+   CURRENT_THREAD_INFO(r14, r14);  
\
+   CURRENT_THREAD_INFO(r15, r1);   
\
+   ld  r10,TI_FLAGS(r14);  
\
+   std r10,TI_FLAGS(r15);  
\
+   ld  r10,TI_PREEMPT(r14);
\
+   std r10,TI_PREEMPT(r15);
\
+   ld  r10,TI_TASK(r14);   
\
+   std r10,TI_TASK(r15);
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -104,6 +116,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(CRIT);
\
 1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
@@ -114,6 +127,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(DBG); 
\
 1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
@@ -124,6 +138,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(MC);  
\
 1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v5][PATCH 4/6] powerpc/book3e: support kgdb for kernel space

2013-06-20 Thread Tiejun Chen
Currently we need to skip this for supporting KGDB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 07cf657..a286b51 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -639,11 +639,13 @@ kernel_dbg_exc:
rfdi
 
/* Normal debug exception */
+1: andi.   r14,r11,MSR_PR; /* check for userspace again */
+#ifndef CONFIG_KGDB
/* XXX We only handle coming from userspace for now since we can't
 * quite save properly an interrupted kernel state yet
 */
-1: andi.   r14,r11,MSR_PR; /* check for userspace again */
beq kernel_dbg_exc; /* if from kernel mode */
+#endif
 
/* Now we mash up things to make it look like we are coming on a
 * normal exception
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v5][PATCH 1/6] powerpc/book3e: load critical/machine/debug exception stack

2013-06-20 Thread Tiejun Chen
We always alloc critical/machine/debug check exceptions. This is
different from the normal exception. So we should load these exception
stack properly like we did for booke.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   49 +++---
 1 file changed, 46 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 4b23119..4d8e57f 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -36,6 +36,37 @@
  */
 #defineSPECIAL_EXC_FRAME_SIZE  INT_FRAME_SIZE
 
+/* only on book3e */
+#define DBG_STACK_BASE dbgirq_ctx
+#define MC_STACK_BASE  mcheckirq_ctx
+#define CRIT_STACK_BASEcritirq_ctx
+
+#ifdef CONFIG_RELOCATABLE
+#define LOAD_STACK_BASE(reg, level)\
+   tovirt(r2,r2);  \
+   LOAD_REG_ADDR(reg, level##_STACK_BASE);
+#else
+#define LOAD_STACK_BASE(reg, level)\
+   LOAD_REG_IMMEDIATE(reg, level##_STACK_BASE);
+#endif
+
+#ifdef CONFIG_SMP
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   mfspr   r14,SPRN_PIR;   \
+   slwir14,r14,3;  \
+   LOAD_STACK_BASE(r10, level);\
+   add r10,r10,r14;\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#else
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   LOAD_STACK_BASE(r10, level);\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#endif
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -68,20 +99,32 @@
 #define SPRN_GDBELL_SRR1   SPRN_GSRR1
 
 #define CRIT_SET_KSTACK
\
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
 
 #define DBG_SET_KSTACK \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
 
 #define MC_SET_KSTACK  \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v5][PATCH 6/6] book3e/kgdb: Fix a single stgep case of lazy IRQ

2013-06-20 Thread Tiejun Chen
When we're in kgdb_singlestep(), we have to work around to get
thread_info by copying from the kernel stack before calling
kgdb_handle_exception(), then copying it back afterwards.

But for PPC64, we have a lazy interrupt implementation. So after
copying thread info frome kernle stack, if we need to replay an
interrupt, we shouldn't restore that previous backup thread info
to make sure we can replay an interrupt lately with a proper
thread info.

This patch use __check_irq_replay() to guarantee this process.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/irq.c  |   10 ++
 arch/powerpc/kernel/kgdb.c |3 ++-
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ea185e0..3625453 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -339,7 +339,17 @@ bool prep_irq_for_idle(void)
return true;
 }
 
+notrace unsigned int check_irq_replay(void)
+{
+   return __check_irq_replay();
+}
+#else
+notrace unsigned int check_irq_replay(void)
+{
+   return 0;
+}
 #endif /* CONFIG_PPC64 */
+EXPORT_SYMBOL(check_irq_replay);
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index cde7818..5b30408 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -152,6 +152,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
 }
 
 static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
+extern notrace unsigned int check_irq_replay(void);
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
@@ -181,7 +182,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
 
kgdb_handle_exception(0, SIGTRAP, 0, regs);
 
-   if (thread_info != exception_thread_info)
+   if ((thread_info != exception_thread_info) && (!check_irq_replay()))
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v5][PATCH 5/6] powerpc/kgdb: use DEFINE_PER_CPU to allocate kgdb's thread_info

2013-06-20 Thread Tiejun Chen
Use DEFINE_PER_CPU to allocate thread_info statically instead of kmalloc().
This can avoid introducing more memory check codes.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 55409ac..cde7818 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -151,15 +151,15 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 1;
 }
 
+static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
-   struct thread_info *backup_current_thread_info;
+   struct thread_info *backup_current_thread_info = 
&__get_cpu_var(kgdb_thread_info);
 
if (user_mode(regs))
return 0;
 
-   backup_current_thread_info = kmalloc(sizeof(struct thread_info), 
GFP_KERNEL);
/*
 * On Book E and perhaps other processors, singlestep is handled on
 * the critical exception stack.  This causes current_thread_info()
@@ -185,7 +185,6 @@ static int kgdb_singlestep(struct pt_regs *regs)
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
-   kfree(backup_current_thread_info);
return 1;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v5][PATCH 3/6] book3e/kgdb: update thread's dbcr0

2013-06-20 Thread Tiejun Chen
gdb always need to generate a single step properly to invoke
a kgdb state. But with lazy interrupt, book3e can't always
trigger a debug exception with a single step since the current
is blocked for handling those pending exception, then we miss
that expected dbcr configuration at last to generate a debug
exception.

So here we also update thread's dbcr0 to make sure the current
can go back with that missed dbcr0 configuration.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |   13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index c1eef24..55409ac 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -410,7 +410,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
   struct pt_regs *linux_regs)
 {
char *ptr = &remcom_in_buffer[1];
-   unsigned long addr;
+   unsigned long addr, dbcr0;
 
switch (remcom_in_buffer[0]) {
/*
@@ -427,8 +427,15 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
/* set the trace bit if we're stepping */
if (remcom_in_buffer[0] == 's') {
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
-   mtspr(SPRN_DBCR0,
- mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+   dbcr0 = mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM;
+   mtspr(SPRN_DBCR0, dbcr0);
+#ifdef CONFIG_PPC_BOOK3E_64
+   /* With lazy interrut we have to update thread dbcr0 
here
+* to make sure we can set debug properly at last to 
invoke
+* kgdb again to work well.
+*/
+   current->thread.dbcr0 = dbcr0;
+#endif
linux_regs->msr |= MSR_DE;
 #else
linux_regs->msr |= MSR_SE;
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v5][PATCH 0/6] powerpc/book3e: powerpc/book3e: make kgdb to work well

2013-06-20 Thread Tiejun Chen
Ben,

As you mention just now, I resend this another pending patch sets v5 used
to support kgdb/gdb on book3e.

v5:

* rebase on merge branch.

Note the original patch, [ATCH 5/7] kgdb/kgdbts: support ppc64, is already 
merged
by Jason.

v4:

* use DEFINE_PER_CPU to allocate kgdb's thread_info
* add patch 7 to make usre copy thread_info only !__check_irq_replay
* leave "andi.   r14,r11,MSR_PR" out of "#ifndef CONFIG_KGDB"
  since cr0 is still used lately.
* retest

v3:

* make work when enable CONFIG_RELOCATABLE
* fix one typo in patch,
  "powerpc/book3e: store critical/machine/debug exception thread info": 
ld  r1,PACAKSAVE(r13);
->  ld  r14,PACAKSAVE(r13);
* remove copying the thread_info since booke and book3e always copy
  the thead_info now when we enter the debug exception, and so drop
  the v2 patch, "book3e/kgdb: Fix a single stgep case of lazy IRQ"

v2:

* Make sure we cover CONFIG_PPC_BOOK3E_64 safely
* Use LOAD_REG_IMMEDIATE() to load properly
the value of the constant expression in load debug exception stack 
* Copy thread infor form the kernel stack coming from usr
* Rebase latest powerpc git tree

v1:
* Copy thread info only when we are from !user mode since we'll get kernel stack
  coming from usr directly.
* remove save/restore EX_R14/EX_R15 since DBG_EXCEPTION_PROLOG already covered
  this.
* use CURRENT_THREAD_INFO() conveniently to get thread.
* fix some typos
* add a patch to make sure gdb can generate a single step properly to invoke a
  kgdb state.
* add a patch to if we need to replay an interrupt, we shouldn't restore that
  previous backup thread info to make sure we can replay an interrupt lately
  with a proper thread info.
* rebase latest powerpc git tree

v0:
This patchset is used to support kgdb for book3e.

--
Tiejun Chen (6):
  powerpc/book3e: load critical/machine/debug exception stack
  powerpc/book3e: store critical/machine/debug exception thread info
  book3e/kgdb: update thread's dbcr0
  powerpc/book3e: support kgdb for kernel space
  powerpc/kgdb: use DEFINE_PER_CPU to allocate kgdb's thread_info
  book3e/kgdb: Fix a single stgep case of lazy IRQ

 arch/powerpc/kernel/exceptions-64e.S |   68 --
 arch/powerpc/kernel/irq.c|   10 +
 arch/powerpc/kernel/kgdb.c   |   21 +++
 3 files changed, 88 insertions(+), 11 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/1] usb: ehci-fsl: set INCR8 mode only on MPC512x

2013-04-23 Thread Tiejun Chen
commit 761bbcb7, "usb: ehci-fsl: set INCR8 mode for system bus interface
on MPC512x", introduced to fix one MPC5121e (M36P) Errata by setting
INCR8 mode for system bus interface on MPC512x, but we should make sure
this is only valid for MPC512x like other parts of this commit. Otherwise
this would issue other platforms as abnormal without this similar Errata.

Signed-off-by: Tiejun Chen 
---
 drivers/usb/host/ehci-fsl.c |   10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index d81d2fc..f4f2a7b 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -509,7 +509,15 @@ static int ehci_fsl_mpc512x_drv_resume(struct device *dev)
ehci_writel(ehci, ISIPHYCTRL_PXE | ISIPHYCTRL_PHYE,
hcd->regs + FSL_SOC_USB_ISIPHYCTRL);
 
-   ehci_writel(ehci, SBUSCFG_INCR8, hcd->regs + FSL_SOC_USB_SBUSCFG);
+   if (of_device_is_compatible(dev->parent->of_node,
+   "fsl,mpc5121-usb2-dr")) {
+   /*
+* set SBUSCFG:AHBBRST so that control msgs don't
+* fail when doing heavy PATA writes.
+*/
+   ehci_writel(ehci, SBUSCFG_INCR8,
+   hcd->regs + FSL_SOC_USB_SBUSCFG);
+   }
 
/* restore EHCI registers */
ehci_writel(ehci, pdata->pm_command, &ehci->regs->command);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[RFC][PATCH 1/1] USB/EHCI: work for different PHY_CLK_VALID detecting order

2013-04-23 Thread Tiejun Chen
Due to different controller issue of PHY_CLK_VALID in ULPI mode,
in some cases, after set PHY_CLK_SEL, we should set
USB_CTRL_USB_EN before checking PHY_CLK_VALID, otherwise
PHY_CLK_VALID doesn't work.

But in other cases USB_CTRL_USB_EN is already set previously and
PHY_CLK_VALID is not valid once USB_CTRL_USB_EN is set. But
since PHY_CLK_VALID is w1c, we can force clear USB_CTRL_USB_EN
firstly after set PHY_CLK_SEL, then PHY_CLK_VALID status can
be kept even we re-set USB_CTRL_USB_EN.

Signed-off-by: Tiejun Chen 
---
 drivers/usb/host/ehci-fsl.c |   15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index d81d2fc..57f2aa0 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -234,11 +234,20 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
/* controller version 1.6 or above */
setbits32(non_ehci + FSL_SOC_USB_CTRL,
ULPI_PHY_CLK_SEL);
+
/*
-* Due to controller issue of PHY_CLK_VALID in ULPI
-* mode, we set USB_CTRL_USB_EN before checking
-* PHY_CLK_VALID, otherwise PHY_CLK_VALID doesn't work.
+* Due to different controller issue of PHY_CLK_VALID
+* in ULPI mode, in some cases we should set
+* USB_CTRL_USB_EN before checking PHY_CLK_VALID,
+* otherwise PHY_CLK_VALID doesn't work.
+*
+* But in other cases USB_CTRL_USB_EN is already set
+* and PHY_CLK_VALID is not valid once USB_CTRL_USB_EN
+* is set. But since PHY_CLK_VALID is w1c, we can force
+* clear USB_CTRL_USB_EN firstly then PHY_CLK_VALID
+* status can be kept even we re-set USB_CTRL_USB_EN.
 */
+   clrbits32(non_ehci + FSL_SOC_USB_CTRL, USB_CTRL_USB_EN);
clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
UTMI_PHY_EN, USB_CTRL_USB_EN);
}
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/1] powerpc/kgdb: use DEFINE_PER_CPU to allocate kgdb's thread_info

2013-03-11 Thread Tiejun Chen
Use DEFINE_PER_CPU to allocate thread_info statically instead of kmalloc().
This can avoid introducing more memory check codes.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 1a57307..cde7818 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -151,15 +151,15 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 1;
 }
 
+static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
-   struct thread_info *backup_current_thread_info;
+   struct thread_info *backup_current_thread_info = 
&__get_cpu_var(kgdb_thread_info);
 
if (user_mode(regs))
return 0;
 
-   backup_current_thread_info = (struct thread_info 
*)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
/*
 * On Book E and perhaps other processors, singlestep is handled on
 * the critical exception stack.  This causes current_thread_info()
@@ -185,7 +185,6 @@ static int kgdb_singlestep(struct pt_regs *regs)
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
-   kfree(backup_current_thread_info);
return 1;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 0/7] powerpc/book3e: powerpc/book3e: make kgdb to work well

2013-03-11 Thread Tiejun Chen
This patchset is used to support kgdb/gdb on book3e.

Validated on p4080ds and p5040ds with test single step and breakpoint

v4:

* use DEFINE_PER_CPU to allocate kgdb's thread_info
* add patch 7 to make usre copy thread_info only !__check_irq_replay
* leave "andi.   r14,r11,MSR_PR" out of "#ifndef CONFIG_KGDB"
  since cr0 is still used lately.
* retest

v3:

* make work when enable CONFIG_RELOCATABLE
* fix one typo in patch,
  "powerpc/book3e: store critical/machine/debug exception thread info": 
ld  r1,PACAKSAVE(r13);
->  ld  r14,PACAKSAVE(r13);
* remove copying the thread_info since booke and book3e always copy
  the thead_info now when we enter the debug exception, and so drop
  the v2 patch, "book3e/kgdb: Fix a single stgep case of lazy IRQ"

v2:

* Make sure we cover CONFIG_PPC_BOOK3E_64 safely
* Use LOAD_REG_IMMEDIATE() to load properly
the value of the constant expression in load debug exception stack 
* Copy thread infor form the kernel stack coming from usr
* Rebase latest powerpc git tree

v1:
* Copy thread info only when we are from !user mode since we'll get kernel stack
  coming from usr directly.
* remove save/restore EX_R14/EX_R15 since DBG_EXCEPTION_PROLOG already covered
  this.
* use CURRENT_THREAD_INFO() conveniently to get thread.
* fix some typos
* add a patch to make sure gdb can generate a single step properly to invoke a
  kgdb state.
* add a patch to if we need to replay an interrupt, we shouldn't restore that
  previous backup thread info to make sure we can replay an interrupt lately
  with a proper thread info.
* rebase latest powerpc git tree

v0:
This patchset is used to support kgdb for book3e.

--
Tiejun Chen (7):
  powerpc/book3e: load critical/machine/debug exception stack
  powerpc/book3e: store critical/machine/debug exception thread info
  book3e/kgdb: update thread's dbcr0
  powerpc/book3e: support kgdb for kernel space
  kgdb/kgdbts: support ppc64
  powerpc/kgdb: use DEFINE_PER_CPU to allocate kgdb's thread_info
  book3e/kgdb: Fix a single stgep case of lazy IRQ

 arch/powerpc/kernel/exceptions-64e.S |   68 --
 arch/powerpc/kernel/irq.c|   10 +
 arch/powerpc/kernel/kgdb.c   |   21 +++
 drivers/misc/kgdbts.c|2 +
 4 files changed, 90 insertions(+), 11 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 5/7] kgdb/kgdbts: support ppc64

2013-03-11 Thread Tiejun Chen
We can't look up the address of the entry point of the function simply
via that function symbol for all architectures.

For PPC64 ABI, actually there is a function descriptors structure.

A function descriptor is a three doubleword data structure that contains
the following values:
* The first doubleword contains the address of the entry point of
the function.
* The second doubleword contains the TOC base address for
the function.
* The third doubleword contains the environment pointer for
languages such as Pascal and PL/1.

So we should call a wapperred dereference_function_descriptor() to get
the address of the entry point of the function.

Note this is also safe for other architecture after refer to
"include/asm-generic/sections.h" since:

dereference_function_descriptor(p) always is (p) if without arched definition.

Signed-off-by: Tiejun Chen 
---
 drivers/misc/kgdbts.c |2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3aa9a96..4799e1f 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -103,6 +103,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define v1printk(a...) do { \
if (verbose) \
@@ -222,6 +223,7 @@ static unsigned long lookup_addr(char *arg)
addr = (unsigned long)do_fork;
else if (!strcmp(arg, "hw_break_val"))
addr = (unsigned long)&hw_break_val;
+   addr = (unsigned long )dereference_function_descriptor((void *)addr);
return addr;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 7/7] book3e/kgdb: Fix a single stgep case of lazy IRQ

2013-03-11 Thread Tiejun Chen
When we're in kgdb_singlestep(), we have to work around to get
thread_info by copying from the kernel stack before calling
kgdb_handle_exception(), then copying it back afterwards.

But for PPC64, we have a lazy interrupt implementation. So after
copying thread info frome kernle stack, if we need to replay an
interrupt, we shouldn't restore that previous backup thread info
to make sure we can replay an interrupt lately with a proper
thread info.

This patch use __check_irq_replay() to guarantee this process.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/irq.c  |   10 ++
 arch/powerpc/kernel/kgdb.c |3 ++-
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 4f97fe3..bb8d27a 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -339,7 +339,17 @@ bool prep_irq_for_idle(void)
return true;
 }
 
+notrace unsigned int check_irq_replay(void)
+{
+   return __check_irq_replay();
+}
+#else
+notrace unsigned int check_irq_replay(void)
+{
+   return 0;
+}
 #endif /* CONFIG_PPC64 */
+EXPORT_SYMBOL(check_irq_replay);
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index cde7818..5b30408 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -152,6 +152,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
 }
 
 static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
+extern notrace unsigned int check_irq_replay(void);
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
@@ -181,7 +182,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
 
kgdb_handle_exception(0, SIGTRAP, 0, regs);
 
-   if (thread_info != exception_thread_info)
+   if ((thread_info != exception_thread_info) && (!check_irq_replay()))
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 2/7] powerpc/book3e: store critical/machine/debug exception thread info

2013-03-11 Thread Tiejun Chen
We need to store thread info to these exception thread info like something
we already did for PPC32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   15 +++
 1 file changed, 15 insertions(+)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 7fd6af0..7df9a1f 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -67,6 +67,18 @@
std r10,PACA_##level##_STACK(r13);
 #endif
 
+/* Store something to exception thread info */
+#defineBOOK3E_STORE_EXC_LEVEL_THEAD_INFO(type) 
\
+   ld  r14,PACAKSAVE(r13); 
\
+   CURRENT_THREAD_INFO(r14, r14);  
\
+   CURRENT_THREAD_INFO(r15, r1);   
\
+   ld  r10,TI_FLAGS(r14);  
\
+   std r10,TI_FLAGS(r15);  
\
+   ld  r10,TI_PREEMPT(r14);
\
+   std r10,TI_PREEMPT(r15);
\
+   ld  r10,TI_TASK(r14);   
\
+   std r10,TI_TASK(r15);
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -104,6 +116,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(CRIT);
\
 1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
@@ -114,6 +127,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(DBG); 
\
 1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
@@ -124,6 +138,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(MC);  
\
 1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 3/7] book3e/kgdb: update thread's dbcr0

2013-03-11 Thread Tiejun Chen
gdb always need to generate a single step properly to invoke
a kgdb state. But with lazy interrupt, book3e can't always
trigger a debug exception with a single step since the current
is blocked for handling those pending exception, then we miss
that expected dbcr configuration at last to generate a debug
exception.

So here we also update thread's dbcr0 to make sure the current
can go back with that missed dbcr0 configuration.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |   13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 5ca82cd..1a57307 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -410,7 +410,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
   struct pt_regs *linux_regs)
 {
char *ptr = &remcom_in_buffer[1];
-   unsigned long addr;
+   unsigned long addr, dbcr0;
 
switch (remcom_in_buffer[0]) {
/*
@@ -427,8 +427,15 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
/* set the trace bit if we're stepping */
if (remcom_in_buffer[0] == 's') {
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
-   mtspr(SPRN_DBCR0,
- mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+   dbcr0 = mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM;
+   mtspr(SPRN_DBCR0, dbcr0);
+#ifdef CONFIG_PPC_BOOK3E_64
+   /* With lazy interrut we have to update thread dbcr0 
here
+* to make sure we can set debug properly at last to 
invoke
+* kgdb again to work well.
+*/
+   current->thread.dbcr0 = dbcr0;
+#endif
linux_regs->msr |= MSR_DE;
 #else
linux_regs->msr |= MSR_SE;
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 4/7] powerpc/book3e: support kgdb for kernel space

2013-03-11 Thread Tiejun Chen
Currently we need to skip this for supporting KGDB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 7df9a1f..fd5d61b 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -598,11 +598,13 @@ kernel_dbg_exc:
rfdi
 
/* Normal debug exception */
+1: andi.   r14,r11,MSR_PR; /* check for userspace again */
+#ifndef CONFIG_KGDB
/* XXX We only handle coming from userspace for now since we can't
 * quite save properly an interrupted kernel state yet
 */
-1: andi.   r14,r11,MSR_PR; /* check for userspace again */
beq kernel_dbg_exc; /* if from kernel mode */
+#endif
 
/* Now we mash up things to make it look like we are coming on a
 * normal exception
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 6/7] powerpc/kgdb: use DEFINE_PER_CPU to allocate kgdb's thread_info

2013-03-11 Thread Tiejun Chen
Use DEFINE_PER_CPU to allocate thread_info statically instead of kmalloc().
This can avoid introducing more memory check codes.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 1a57307..cde7818 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -151,15 +151,15 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 1;
 }
 
+static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
-   struct thread_info *backup_current_thread_info;
+   struct thread_info *backup_current_thread_info = 
&__get_cpu_var(kgdb_thread_info);
 
if (user_mode(regs))
return 0;
 
-   backup_current_thread_info = (struct thread_info 
*)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
/*
 * On Book E and perhaps other processors, singlestep is handled on
 * the critical exception stack.  This causes current_thread_info()
@@ -185,7 +185,6 @@ static int kgdb_singlestep(struct pt_regs *regs)
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
-   kfree(backup_current_thread_info);
return 1;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v4][PATCH 1/7] powerpc/book3e: load critical/machine/debug exception stack

2013-03-11 Thread Tiejun Chen
We always alloc critical/machine/debug check exceptions. This is
different from the normal exception. So we should load these exception
stack properly like we did for booke.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   49 +++---
 1 file changed, 46 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 1e7782b..7fd6af0 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -36,6 +36,37 @@
  */
 #defineSPECIAL_EXC_FRAME_SIZE  INT_FRAME_SIZE
 
+/* only on book3e */
+#define DBG_STACK_BASE dbgirq_ctx
+#define MC_STACK_BASE  mcheckirq_ctx
+#define CRIT_STACK_BASEcritirq_ctx
+
+#ifdef CONFIG_RELOCATABLE
+#define LOAD_STACK_BASE(reg, level)\
+   tovirt(r2,r2);  \
+   LOAD_REG_ADDR(reg, level##_STACK_BASE);
+#else
+#define LOAD_STACK_BASE(reg, level)\
+   LOAD_REG_IMMEDIATE(reg, level##_STACK_BASE);
+#endif
+
+#ifdef CONFIG_SMP
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   mfspr   r14,SPRN_PIR;   \
+   slwir14,r14,3;  \
+   LOAD_STACK_BASE(r10, level);\
+   add r10,r10,r14;\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#else
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   LOAD_STACK_BASE(r10, level);\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#endif
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -68,20 +99,32 @@
 #define SPRN_GDBELL_SRR1   SPRN_GSRR1
 
 #define CRIT_SET_KSTACK
\
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
 
 #define DBG_SET_KSTACK \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
 
 #define MC_SET_KSTACK  \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/3] kgdb,ppc: do not set kgdb_single_step on ppc

2012-08-22 Thread Tiejun Chen
The kgdb_single_step flag has the possibility to indefinitely
hang the system on an SMP system.

The x86 arch have the same problem, and that problem was fixed by
commit 8097551d9ab9b9e3630(kgdb,x86: do not set kgdb_single_step
on x86). This patch does the same behaviors as x86's patch.

Signed-off-by: Dongdong Deng 
Signed-off-by: Jason Wessel 
Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |1 -
 1 files changed, 0 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 782bd0a..bbabc5a 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -410,7 +410,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
 #else
linux_regs->msr |= MSR_SE;
 #endif
-   kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
   raw_smp_processor_id());
}
-- 
1.5.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 3/3] powerpc/kgdb: restore current_thread_info properly

2012-08-22 Thread Tiejun Chen
For powerpc BooKE and e200, singlestep is handled on the critical/dbg
exception stack. This causes current_thread_info() to fail for kgdb
internal, so previously We work around this issue by copying
the thread_info from the kernel stack before calling kgdb_handle_exception,
and copying it back afterwards.

But actually we don't do this previously. We should backp current_thread_info
then restore that when exit.

CC: Jason Wessel 
Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |   11 +--
 1 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index e84252b..7dcdbe7 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -25,6 +25,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /*
  * This table contains the mapping between PowerPC hardware trap types, and
@@ -156,6 +157,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
+   struct thread_info *backup_current_thread_info = \
+   (struct thread_info *)kmalloc(sizeof(struct thread_info), 
GFP_KERNEL);
 
if (user_mode(regs))
return 0;
@@ -173,13 +176,17 @@ static int kgdb_singlestep(struct pt_regs *regs)
thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
exception_thread_info = current_thread_info();
 
-   if (thread_info != exception_thread_info)
+   if (thread_info != exception_thread_info) {
+   /* Save the original current_thread_info. */
+   memcpy(backup_current_thread_info, exception_thread_info, 
sizeof *thread_info);
memcpy(exception_thread_info, thread_info, sizeof *thread_info);
+   }
 
kgdb_handle_exception(0, SIGTRAP, 0, regs);
 
if (thread_info != exception_thread_info)
-   memcpy(thread_info, exception_thread_info, sizeof *thread_info);
+   /* Restore current_thread_info lastly. */
+   memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
return 1;
 }
-- 
1.5.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 2/3] powerpc: Bail out of KGDB when we've been triggered

2012-08-22 Thread Tiejun Chen
We need to skip a breakpoint exception when it occurs after
a breakpoint has already been removed.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |   18 ++
 1 files changed, 18 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index bbabc5a..e84252b 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -101,6 +101,24 @@ static int computeSignal(unsigned int tt)
return SIGHUP;  /* default for things we don't know about */
 }
 
+/**
+ *
+ * kgdb_skipexception - Bail out of KGDB when we've been triggered.
+ * @exception: Exception vector number
+ * @regs: Current &struct pt_regs.
+ *
+ * On some architectures we need to skip a breakpoint exception when
+ * it occurs after a breakpoint has been removed.
+ *
+ */
+int kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+   if (kgdb_isremovedbreak(regs->nip))
+   return 1;
+
+   return 0;
+}
+
 static int kgdb_call_nmi_hook(struct pt_regs *regs)
 {
kgdb_nmicallback(raw_smp_processor_id(), regs);
-- 
1.5.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 2/3] powerpc: Bail out of KGDB when we've been triggered

2012-08-22 Thread Tiejun Chen
We need to skip a breakpoint exception when it occurs after
a breakpoint has already been removed.

Signed-off-by: Tiejun Chen 
---
v2: simply kgdb_skipexception() return path. 

 arch/powerpc/kernel/kgdb.c |   15 +++
 1 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index bbabc5a..05adb69 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -101,6 +101,21 @@ static int computeSignal(unsigned int tt)
return SIGHUP;  /* default for things we don't know about */
 }
 
+/**
+ *
+ * kgdb_skipexception - Bail out of KGDB when we've been triggered.
+ * @exception: Exception vector number
+ * @regs: Current &struct pt_regs.
+ *
+ * On some architectures we need to skip a breakpoint exception when
+ * it occurs after a breakpoint has been removed.
+ *
+ */
+int kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+   return kgdb_isremovedbreak(regs->nip);
+}
+
 static int kgdb_call_nmi_hook(struct pt_regs *regs)
 {
kgdb_nmicallback(raw_smp_processor_id(), regs);
-- 
1.5.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 1/3] kgdb,ppc: do not set kgdb_single_step on ppc

2012-08-22 Thread Tiejun Chen
The kgdb_single_step flag has the possibility to indefinitely
hang the system on an SMP system.

The x86 arch have the same problem, and that problem was fixed by
commit 8097551d9ab9b9e3630(kgdb,x86: do not set kgdb_single_step
on x86). This patch does the same behaviors as x86's patch.

Signed-off-by: Dongdong Deng 
Signed-off-by: Jason Wessel 
---
v2: nothing changed.

 arch/powerpc/kernel/kgdb.c |1 -
 1 files changed, 0 insertions(+), 1 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 782bd0a..bbabc5a 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -410,7 +410,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
 #else
linux_regs->msr |= MSR_SE;
 #endif
-   kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
   raw_smp_processor_id());
}
-- 
1.5.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 3/3] powerpc/kgdb: restore current_thread_info properly

2012-08-22 Thread Tiejun Chen
For powerpc BooKE and e200, singlestep is handled on the critical/dbg
exception stack. This causes current_thread_info() to fail for kgdb
internal, so previously We work around this issue by copying
the thread_info from the kernel stack before calling kgdb_handle_exception,
and copying it back afterwards.

But actually we don't do this properly. We should backup current_thread_info
then restore that when exit.

Signed-off-by: Tiejun Chen 
---
v2: fix a typo in patch head description.

 arch/powerpc/kernel/kgdb.c |   11 +--
 1 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 05adb69..c470a40 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -25,6 +25,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /*
  * This table contains the mapping between PowerPC hardware trap types, and
@@ -153,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
+   struct thread_info *backup_current_thread_info = \
+   (struct thread_info *)kmalloc(sizeof(struct thread_info), 
GFP_KERNEL);
 
if (user_mode(regs))
return 0;
@@ -170,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs)
thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
exception_thread_info = current_thread_info();
 
-   if (thread_info != exception_thread_info)
+   if (thread_info != exception_thread_info) {
+   /* Save the original current_thread_info. */
+   memcpy(backup_current_thread_info, exception_thread_info, 
sizeof *thread_info);
memcpy(exception_thread_info, thread_info, sizeof *thread_info);
+   }
 
kgdb_handle_exception(0, SIGTRAP, 0, regs);
 
if (thread_info != exception_thread_info)
-   memcpy(thread_info, exception_thread_info, sizeof *thread_info);
+   /* Restore current_thread_info lastly. */
+   memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
return 1;
 }
-- 
1.5.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 1/6] powerpc/book3e: load critical/machine/debug exception stack

2013-01-20 Thread Tiejun Chen
We always alloc critical/machine/debug check exceptions. This is
different from the normal exception. So we should load these exception
stack properly like we did for booke.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   40 +++---
 1 file changed, 37 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index ae54553..767f856 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -36,6 +36,28 @@
  */
 #defineSPECIAL_EXC_FRAME_SIZE  INT_FRAME_SIZE
 
+/* only on book3e */
+#define DBG_STACK_BASE dbgirq_ctx
+#define MC_STACK_BASE  mcheckirq_ctx
+#define CRIT_STACK_BASEcritirq_ctx
+
+#ifdef CONFIG_SMP
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   mfspr   r14,SPRN_PIR;   \
+   slwir14,r14,3;  \
+   LOAD_REG_IMMEDIATE(r10, level##_STACK_BASE);\
+   add r10,r10,r14;\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#else
+#define BOOK3E_LOAD_EXC_LEVEL_STACK(level) \
+   LOAD_REG_IMMEDIATE(r10, level##_STACK_BASE);\
+   ld  r10,0(r10); \
+   addir10,r10,THREAD_SIZE;\
+   std r10,PACA_##level##_STACK(r13);
+#endif
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -68,20 +90,32 @@
 #define SPRN_GDBELL_SRR1   SPRN_GSRR1
 
 #define CRIT_SET_KSTACK
\
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
 
 #define DBG_SET_KSTACK \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
 
 #define MC_SET_KSTACK  \
+   andi.   r10,r11,MSR_PR; 
\
+   bne 1f; 
\
+   BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 2/6] powerpc/book3e: store critical/machine/debug exception thread info

2013-01-20 Thread Tiejun Chen
We need to store thread info to these exception thread info like something
we already did for PPC32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   15 +++
 1 file changed, 15 insertions(+)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 767f856..423a936 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -58,6 +58,18 @@
std r10,PACA_##level##_STACK(r13);
 #endif
 
+/* Store something to exception thread info */
+#defineBOOK3E_STORE_EXC_LEVEL_THEAD_INFO(type) 
\
+   ld  r1,PACAKSAVE(r13);  
\
+   CURRENT_THREAD_INFO(r14, r14);  
\
+   CURRENT_THREAD_INFO(r15, r1);   
\
+   ld  r10,TI_FLAGS(r14);  
\
+   std r10,TI_FLAGS(r15);  
\
+   ld  r10,TI_PREEMPT(r14);
\
+   std r10,TI_PREEMPT(r15);
\
+   ld  r10,TI_TASK(r14);   
\
+   std r10,TI_TASK(r15);
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -95,6 +107,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(CRIT);  
\
ld  r1,PACA_CRIT_STACK(r13);\
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(CRIT);
\
 1:
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
@@ -105,6 +118,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(DBG);   
\
ld  r1,PACA_DBG_STACK(r13); \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(DBG); 
\
 1:
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
@@ -115,6 +129,7 @@
BOOK3E_LOAD_EXC_LEVEL_STACK(MC);
\
ld  r1,PACA_MC_STACK(r13);  \
subir1,r1,SPECIAL_EXC_FRAME_SIZE;   
\
+   BOOK3E_STORE_EXC_LEVEL_THEAD_INFO(MC);  
\
 1:
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 6/6] kgdb/kgdbts: support ppc64

2013-01-20 Thread Tiejun Chen
We can't look up the address of the entry point of the function simply
via that function symbol for all architectures.

For PPC64 ABI, actually there is a function descriptors structure.

A function descriptor is a three doubleword data structure that contains
the following values:
* The first doubleword contains the address of the entry point of
the function.
* The second doubleword contains the TOC base address for
the function.
* The third doubleword contains the environment pointer for
languages such as Pascal and PL/1.

So we should call a wapperred dereference_function_descriptor() to get
the address of the entry point of the function.

Note this is also safe for other architecture after refer to
"include/asm-generic/sections.h" since:

dereference_function_descriptor(p) always is (p) if without arched definition.

Signed-off-by: Tiejun Chen 
---
 drivers/misc/kgdbts.c |2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3aa9a96..4799e1f 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -103,6 +103,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define v1printk(a...) do { \
if (verbose) \
@@ -222,6 +223,7 @@ static unsigned long lookup_addr(char *arg)
addr = (unsigned long)do_fork;
else if (!strcmp(arg, "hw_break_val"))
addr = (unsigned long)&hw_break_val;
+   addr = (unsigned long )dereference_function_descriptor((void *)addr);
return addr;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 3/6] book3e/kgdb: update thread's dbcr0

2013-01-20 Thread Tiejun Chen
gdb always need to generate a single step properly to invoke
a kgdb state. But with lazy interrupt, book3e can't always
trigger a debug exception with a single step since the current
is blocked for handling those pending exception, then we miss
that expected dbcr configuration at last to generate a debug
exception.

So here we also update thread's dbcr0 to make sure the current
can go back with that missed dbcr0 configuration.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |   13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 8747447..eb30a40 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -409,7 +409,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
   struct pt_regs *linux_regs)
 {
char *ptr = &remcom_in_buffer[1];
-   unsigned long addr;
+   unsigned long addr, dbcr0;
 
switch (remcom_in_buffer[0]) {
/*
@@ -426,8 +426,15 @@ int kgdb_arch_handle_exception(int vector, int signo, int 
err_code,
/* set the trace bit if we're stepping */
if (remcom_in_buffer[0] == 's') {
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
-   mtspr(SPRN_DBCR0,
- mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+   dbcr0 = mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM;
+   mtspr(SPRN_DBCR0, dbcr0);
+#ifdef CONFIG_PPC_BOOK3E_64
+   /* With lazy interrut we have to update thread dbcr0 
here
+* to make sure we can set debug properly at last to 
invoke
+* kgdb again to work well.
+*/
+   current->thread.dbcr0 = dbcr0;
+#endif
linux_regs->msr |= MSR_DE;
 #else
linux_regs->msr |= MSR_SE;
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 4/6] book3e/kgdb: Fix a single stgep case of lazy IRQ

2013-01-20 Thread Tiejun Chen
When we're in kgdb_singlestep(), we have to work around to get
thread_info by copying from the kernel stack before calling
kgdb_handle_exception(), then copying it back afterwards.

But for PPC64, we have a lazy interrupt implementation. So after
copying thread info frome kernle stack, if we need to replay an
interrupt, we shouldn't restore that previous backup thread info
to make sure we can replay an interrupt lately with a proper
thread info.

This patch use __check_irq_replay() to guarantee this process.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/irq.c  |   10 ++
 arch/powerpc/kernel/kgdb.c |3 ++-
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 4f97fe3..bb8d27a 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -339,7 +339,17 @@ bool prep_irq_for_idle(void)
return true;
 }
 
+notrace unsigned int check_irq_replay(void)
+{
+   return __check_irq_replay();
+}
+#else
+notrace unsigned int check_irq_replay(void)
+{
+   return 0;
+}
 #endif /* CONFIG_PPC64 */
+EXPORT_SYMBOL(check_irq_replay);
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index eb30a40..2f22807 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -151,6 +151,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 1;
 }
 
+extern notrace unsigned int check_irq_replay(void);
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
@@ -181,7 +182,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
 
kgdb_handle_exception(0, SIGTRAP, 0, regs);
 
-   if (thread_info != exception_thread_info)
+   if ((thread_info != exception_thread_info) && (!check_irq_replay()))
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 0/6] powerpc/book3e: make kgdb to work well

2013-01-20 Thread Tiejun Chen
This patchset is used to support kgdb/gdb on book3e.

v2:

* Make sure we cover CONFIG_PPC_BOOK3E_64 safely
* Use LOAD_REG_IMMEDIATE() to load properly
the value of the constant expression in load debug exception stack 
* Copy thread infor form the kernel stack coming from usr
* Rebase latest powerpc git tree

v1:
* Copy thread info only when we are from !user mode since we'll get kernel stack
  coming from usr directly.
* remove save/restore EX_R14/EX_R15 since DBG_EXCEPTION_PROLOG already covered
  this.
* use CURRENT_THREAD_INFO() conveniently to get thread.
* fix some typos
* add a patch to make sure gdb can generate a single step properly to invoke a
  kgdb state.
* add a patch to if we need to replay an interrupt, we shouldn't restore that
  previous backup thread info to make sure we can replay an interrupt lately
  with a proper thread info.
* rebase latest powerpc git tree

v0:
This patchset is used to support kgdb for book3e.


Tiejun Chen (6):
  powerpc/book3e: load critical/machine/debug exception stack
  powerpc/book3e: store critical/machine/debug exception thread info
  book3e/kgdb: update thread's dbcr0
  book3e/kgdb: Fix a single stgep case of lazy IRQ
  powerpc/book3e: support kgdb for kernel space
  kgdb/kgdbts: support ppc64

 arch/powerpc/kernel/exceptions-64e.S |   60 +++---
 arch/powerpc/kernel/irq.c|   10 ++
 arch/powerpc/kernel/kgdb.c   |   16 ++---
 drivers/misc/kgdbts.c|2 ++
 4 files changed, 80 insertions(+), 8 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v2][PATCH 5/6] powerpc/book3e: support kgdb for kernel space

2013-01-20 Thread Tiejun Chen
Currently we need to skip this for supporting KGDB.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |5 -
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 423a936..6204681 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -589,11 +589,14 @@ kernel_dbg_exc:
rfdi
 
/* Normal debug exception */
+1:
+#ifndef CONFIG_KGDB
/* XXX We only handle coming from userspace for now since we can't
 * quite save properly an interrupted kernel state yet
 */
-1: andi.   r14,r11,MSR_PR; /* check for userspace again */
+   andi.   r14,r11,MSR_PR; /* check for userspace again */
beq kernel_dbg_exc; /* if from kernel mode */
+#endif
 
/* Now we mash up things to make it look like we are coming on a
 * normal exception
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[RFC][v0][PATCH 1/1] ppc64: ignore interrupts while offline

2013-01-15 Thread Tiejun Chen
With lazy interrupt, some implementations of hotplug will
get some interrupts even while offline, just ignore these.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/irq.c |6 ++
 1 file changed, 6 insertions(+)

diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 4f97fe3..dbca574 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -144,6 +144,12 @@ notrace unsigned int __check_irq_replay(void)
 */
unsigned char happened = local_paca->irq_happened;
 
+   /* Some implementations of hotplug will get some interrupts while
+* offline, just ignore these.
+*/
+   if (cpu_is_offline(smp_processor_id()))
+   return 0;
+
/* Clear bit 0 which we wouldn't clear otherwise */
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v1][PATCH 1/1] ppc64: max next_tb to prevent from replaying timer interrupt

2013-01-15 Thread Tiejun Chen
With lazy interrupt, we always call __check_irq_replaysome with
decrementers_next_tb to check if we need to replay timer interrupt.
So in hotplug case we also need to set decrementers_next_tb as MAX
to make sure __check_irq_replay don't replay timer interrupt
when return as we expect, otherwise we'll trap here infinitely.

Signed-off-by: Tiejun Chen 
---
v1:

* In hotplug case we max decrementers_next_tb to prevent from
replaying timer interrupt for any offline CPU.

 arch/powerpc/kernel/time.c |9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6f6b1cc..127361e 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -494,10 +494,15 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(DECREMENTER_MAX);
 
/* Some implementations of hotplug will get timer interrupts while
-* offline, just ignore these
+* offline, just ignore these and we also need to set
+* decrementers_next_tb as MAX to make sure __check_irq_replay
+* don't replay timer interrupt when return, otherwise we'll trap
+* here infinitely :(
 */
-   if (!cpu_online(smp_processor_id()))
+   if (!cpu_online(smp_processor_id())) {
+   *next_tb = ~(u64)0;
return;
+   }
 
/* Conditionally hard-enable interrupts now that the DEC has been
 * bumped to its maximum value
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v0][PATCH 1/1] powerpc/book3e: disable interrupt after preempt_schedule_irq

2013-01-06 Thread Tiejun Chen
In preempt case current arch_local_irq_restore() from
preempt_schedule_irq() may enable hard interrupt but we really
should disable interrupts when we return from the interrupt,
and so that we don't get interrupted after loading SRR0/1.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/entry_64.S |   13 +
 1 file changed, 13 insertions(+)

diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index e9a906c..4e1de34 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -662,6 +662,19 @@ resume_kernel:
ld  r4,TI_FLAGS(r9)
andi.   r0,r4,_TIF_NEED_RESCHED
bne 1b
+
+   /*
+* arch_local_irq_restore() from preempt_schedule_irq above may
+* enable hard interrupt but we really should disable interrupts
+* when we return from the interrupt, and so that we don't get
+* interrupted after loading SRR0/1.
+*/
+#ifdef CONFIG_PPC_BOOK3E
+   wrteei  0
+#else
+   ld  r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+   mtmsrd  r10,1 /* Update machine state */
+#endif /* CONFIG_PPC_BOOK3E */
 #endif /* CONFIG_PREEMPT */
 
.globl  fast_exc_return_irq
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


RE: [ANNOUNCE] 4.14.63-rt40

2018-08-23 Thread Tiejun Chen
Steven,

commit 7f11a591bbdb111792298144c3476506aa7f1ca8 (HEAD -> v4.14.63-rt40-rebase, 
tag: v4.14.63-rt40-rebase, origin/v4.14-rt-rebase)
Author: Steven Rostedt (VMware) 
Date:   Wed May 16 09:33:00 2018 -0400

Linux 4.14.63-rt40 REBASE

diff --git a/localversion-rt b/localversion-rt
index 90290c642ed5..a3b2408c1da6 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@
--rt29
+-rt37
^
Isn't it supposed to be rt40?

Thanks
Tiejun

> -Original Message-
> From: linux-rt-users-ow...@vger.kernel.org  ow...@vger.kernel.org> On Behalf Of Steven Rostedt
> Sent: Thursday, August 23, 2018 2:15 AM
> To: LKML ; linux-rt-users  us...@vger.kernel.org>
> Cc: Thomas Gleixner ; Carsten Emde
> ; John Kacur ; Sebastian Andrzej
> Siewior ; Julia Cartwright ; Daniel
> Wagner ; Tom Zanussi
> 
> Subject: [ANNOUNCE] 4.14.63-rt40
> 
> 
> Dear RT Folks,
> 
> I'm pleased to announce the 4.14.63-rt40 stable release.
> 
> 
> This release is just an update to the new stable 4.14.63 version and no RT
> specific changes have been made.
> 
> NOTE: There is a known issue with this release. The fix is here:
> 
> 
> https://na01.safelinks.protection.outlook.com/?url=http%3A%2F%2Flkml.ker
> nel.org%2Fr%2F1534660115.6187.4.camel%40gmx.de&data=02%7C01%
> 7Ctiejunc%40vmware.com%7Cf5505a126bb8491ac2db08d6085b2e15%7Cb39
> 138ca3cee4b4aa4d6cd83d9dd62f0%7C1%7C0%7C636705585062142956&
> ;sdata=vdH9xCIjC2pVwMggGJvVAkfIrq2jG3qJaTi0asnq7Sk%3D&reserved
> =0
> 
> I did not apply it to this release because it needs to be added to v4.16-rt 
> first
> before it gets backported. Feel free to apply it yourself if you have any
> concerns.
> 
> You can get this release via the git tree at:
> 
>   git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git
> 
>   branch: v4.14-rt
>   Head SHA1: da6bee3cef4af60566dc56f3d48fce0b18165107
> 
> 
> Or to build 4.14.63-rt40 directly, the following patches should be applied:
> 
> 
> https://na01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.ke
> rnel.org%2Fpub%2Flinux%2Fkernel%2Fv4.x%2Flinux-
> 4.14.tar.xz&data=02%7C01%7Ctiejunc%40vmware.com%7Cf5505a126bb
> 8491ac2db08d6085b2e15%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C1%7
> C0%7C636705585062152970&sdata=cuTUJOzPb8rJSbK8QLfqb5s9jHNYk3
> WbrUDla5s6ixM%3D&reserved=0
> 
> 
> https://na01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.ke
> rnel.org%2Fpub%2Flinux%2Fkernel%2Fv4.x%2Fpatch-
> 4.14.63.xz&data=02%7C01%7Ctiejunc%40vmware.com%7Cf5505a126bb
> 8491ac2db08d6085b2e15%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C1%7
> C0%7C636705585062152970&sdata=yVr3le4J2mYik%2B8MJ796KcEb%2B
> 7i%2BhAkEWQt2qZ5q2KY%3D&reserved=0
> 
> 
> https://na01.safelinks.protection.outlook.com/?url=http%3A%2F%2Fwww.ke
> rnel.org%2Fpub%2Flinux%2Fkernel%2Fprojects%2Frt%2F4.14%2Fpatch-
> 4.14.63-
> rt40.patch.xz&data=02%7C01%7Ctiejunc%40vmware.com%7Cf5505a126
> bb8491ac2db08d6085b2e15%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C1
> %7C0%7C636705585062152970&sdata=TEcL3d3nkG623UFOgWZV5wkrJl
> oPsY7qXcZ8bm%2FmFDI%3D&reserved=0
> 
> 
> 
> 
> Enjoy,
> 
> -- Steve



[RFC][PATCH] gpu:drm:i915:intel_detect_pch: back to check devfn instead of check class type

2014-06-19 Thread Tiejun Chen
Originally the reason to probe ISA bridge instead of Dev31:Fun0
is to make graphics device passthrough work easy for VMM, that
only need to expose ISA bridge to let driver know the real
hardware underneath. This is a requirement from virtualization
team. Especially in that virtualized environments, XEN, there
is irrelevant ISA bridge in the system with that legacy qemu
version specific to xen, qemu-xen-traditional. So to work
reliably, we should scan through all the ISA bridge devices
and check for the first match, instead of only checking the
first one.

But actually, qemu-xen-traditional, is always enumerated with
Dev31:Fun0, 00:1f.0 as follows:

hw/pt-graphics.c:

intel_pch_init()
|
+ pci_isa_bridge_init(bus, PCI_DEVFN(0x1f, 0), ...);

so this mean that isa bridge is still represented with Dev31:Func0
like the native OS. Furthermore, currently we're pushing VGA
passthrough support into qemu upstream, and with some discussion,
we wouldn't set the bridge class type and just expose this devfn.

So we just go back to check devfn to make life normal.

Signed-off-by: Tiejun Chen 
---
 drivers/gpu/drm/i915/i915_drv.c | 19 +++
 1 file changed, 3 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 651e65e..cb2526e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -417,18 +417,8 @@ void intel_detect_pch(struct drm_device *dev)
return;
}
 
-   /*
-* The reason to probe ISA bridge instead of Dev31:Fun0 is to
-* make graphics device passthrough work easy for VMM, that only
-* need to expose ISA bridge to let driver know the real hardware
-* underneath. This is a requirement from virtualization team.
-*
-* In some virtualized environments (e.g. XEN), there is irrelevant
-* ISA bridge in the system. To work reliably, we should scan trhough
-* all the ISA bridge devices and check for the first match, instead
-* of only checking the first one.
-*/
-   while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+   pch = pci_get_bus_and_slot(0, PCI_DEVFN(0x1f, 0));
+   if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
unsigned short id = pch->device & 
INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
@@ -462,10 +452,7 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
-   } else
-   continue;
-
-   break;
+   }
}
}
if (!pch)
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v6][PATCH 0/5] powerpc/book3e: powerpc/book3e: make kgdb to work well

2013-10-23 Thread Tiejun Chen
Scott,

Tested on fsl-p5040 DS.

v6:

* rebase
* change the C code to initialize the exception stack addresses in the PACA 
instead.
* Clear the PACA_IRQ_HARD_DIS force to exit directly from this debug exception
  without replaying interrupt.
* so drop "book3e/kgdb: update thread's dbcr0".

v5:

* rebase on merge branch.

Note the original patch, [ATCH 5/7] kgdb/kgdbts: support ppc64, is already 
merged
by Jason.

v4:

* use DEFINE_PER_CPU to allocate kgdb's thread_info
* add patch 7 to make usre copy thread_info only !__check_irq_replay
* leave "andi.   r14,r11,MSR_PR" out of "#ifndef CONFIG_KGDB"
  since cr0 is still used lately.
* retest

v3:

* make work when enable CONFIG_RELOCATABLE
* fix one typo in patch,
  "powerpc/book3e: store critical/machine/debug exception thread info": 
ld  r1,PACAKSAVE(r13);
->  ld  r14,PACAKSAVE(r13);
* remove copying the thread_info since booke and book3e always copy
  the thead_info now when we enter the debug exception, and so drop
  the v2 patch, "book3e/kgdb: Fix a single stgep case of lazy IRQ"

v2:

* Make sure we cover CONFIG_PPC_BOOK3E_64 safely
* Use LOAD_REG_IMMEDIATE() to load properly
the value of the constant expression in load debug exception stack 
* Copy thread infor form the kernel stack coming from usr
* Rebase latest powerpc git tree

v1:

* Copy thread info only when we are from !user mode since we'll get kernel stack
  coming from usr directly.
* remove save/restore EX_R14/EX_R15 since DBG_EXCEPTION_PROLOG already covered
  this.
* use CURRENT_THREAD_INFO() conveniently to get thread.
* fix some typos
* add a patch to make sure gdb can generate a single step properly to invoke a
  kgdb state.
* add a patch to if we need to replay an interrupt, we shouldn't restore that
  previous backup thread info to make sure we can replay an interrupt lately
  with a proper thread info.
* rebase latest powerpc git tree

v0:

This patchset is used to support kgdb for book3e.

----
Tiejun Chen (5):
  powerpc/book3e: initialize crit/mc/dbg kernel stack pointers
  powerpc/book3e: store crit/mc/dbg exception thread info
  powerpc/book3e: support kgdb for kernel space
  powerpc/kgdb: use DEFINE_PER_CPU to allocate kgdb's thread_info
  powerpc/book3e/kgdb: Fix a single stgep case of lazy IRQ

 arch/powerpc/kernel/exceptions-64e.S |   26 ++
 arch/powerpc/kernel/kgdb.c   |   13 ++---
 arch/powerpc/kernel/setup_64.c   |   18 --
 3 files changed, 44 insertions(+), 13 deletions(-)

Tiejun
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v6][PATCH 5/5] powerpc/book3e/kgdb: Fix a single stgep case of lazy IRQ

2013-10-23 Thread Tiejun Chen
In lazy EE magic, we may have a lazy interrupt occured while
entering kgdb, but we really don't want to replay that interrupt
for kgdb, so we have to clear the PACA_IRQ_HARD_DIS force to
make sure we can exit directly from this debug exception.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |8 
 1 file changed, 8 insertions(+)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 447c14b..9872f58 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -185,6 +185,14 @@ static int kgdb_singlestep(struct pt_regs *regs)
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
+#ifdef CONFIG_PPC64
+   /*
+* Clear the PACA_IRQ_HARD_DIS from the pending mask
+* since we are about to exit this directly from debug
+* exception without any replay interrupt in lazy EE case.
+*/
+   local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+#endif
return 1;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v6][PATCH 2/5] powerpc/book3e: store crit/mc/dbg exception thread info

2013-10-23 Thread Tiejun Chen
We need to store thread info to these exception thread info like something
we already did for PPC32.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/exceptions-64e.S |   22 +++---
 1 file changed, 19 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64e.S 
b/arch/powerpc/kernel/exceptions-64e.S
index 68d74b4..a55cf62 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -36,6 +36,19 @@
  */
 #defineSPECIAL_EXC_FRAME_SIZE  INT_FRAME_SIZE
 
+/* Now we only store something to exception thread info */
+#defineEXC_LEVEL_EXCEPTION_PROLOG(type)
\
+   ld  r14,PACAKSAVE(r13); \
+   CURRENT_THREAD_INFO(r14, r14);  \
+   CURRENT_THREAD_INFO(r15, r1);   \
+   ld  r10,TI_FLAGS(r14);  \
+   std r10,TI_FLAGS(r15);  \
+   ld  r10,TI_PREEMPT(r14);\
+   std r10,TI_PREEMPT(r15);\
+   ld  r10,TI_TASK(r14);   \
+   std r10,TI_TASK(r15);
+
+
 /* Exception prolog code for all exceptions */
 #define EXCEPTION_PROLOG(n, intnum, type, addition)\
mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
@@ -69,19 +82,22 @@
 
 #define CRIT_SET_KSTACK
\
ld  r1,PACA_CRIT_STACK(r13);\
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   \
+   EXC_LEVEL_EXCEPTION_PROLOG(CRIT);
 #define SPRN_CRIT_SRR0 SPRN_CSRR0
 #define SPRN_CRIT_SRR1 SPRN_CSRR1
 
 #define DBG_SET_KSTACK \
ld  r1,PACA_DBG_STACK(r13); \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   \
+   EXC_LEVEL_EXCEPTION_PROLOG(DBG);
 #define SPRN_DBG_SRR0  SPRN_DSRR0
 #define SPRN_DBG_SRR1  SPRN_DSRR1
 
 #define MC_SET_KSTACK  \
ld  r1,PACA_MC_STACK(r13);  \
-   subir1,r1,SPECIAL_EXC_FRAME_SIZE;
+   subir1,r1,SPECIAL_EXC_FRAME_SIZE;   \
+   EXC_LEVEL_EXCEPTION_PROLOG(MC);
 #define SPRN_MC_SRR0   SPRN_MCSRR0
 #define SPRN_MC_SRR1   SPRN_MCSRR1
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v6][PATCH 4/5] powerpc/kgdb: use DEFINE_PER_CPU to allocate kgdb's thread_info

2013-10-23 Thread Tiejun Chen
Use DEFINE_PER_CPU to allocate thread_info statically instead of kmalloc().
This can avoid introducing more memory check codes.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/kgdb.c |5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index c1eef24..447c14b 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -151,15 +151,15 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 1;
 }
 
+static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info);
 static int kgdb_singlestep(struct pt_regs *regs)
 {
struct thread_info *thread_info, *exception_thread_info;
-   struct thread_info *backup_current_thread_info;
+   struct thread_info *backup_current_thread_info = 
&__get_cpu_var(kgdb_thread_info);
 
if (user_mode(regs))
return 0;
 
-   backup_current_thread_info = kmalloc(sizeof(struct thread_info), 
GFP_KERNEL);
/*
 * On Book E and perhaps other processors, singlestep is handled on
 * the critical exception stack.  This causes current_thread_info()
@@ -185,7 +185,6 @@ static int kgdb_singlestep(struct pt_regs *regs)
/* Restore current_thread_info lastly. */
memcpy(exception_thread_info, backup_current_thread_info, 
sizeof *thread_info);
 
-   kfree(backup_current_thread_info);
return 1;
 }
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[v6][PATCH 1/5] powerpc/book3e: initialize crit/mc/dbg kernel stack pointers

2013-10-23 Thread Tiejun Chen
We already allocated critical/machine/debug check exceptions, but
we also should initialize those associated kernel stack pointers
for use by special exceptions in the PACA.

Signed-off-by: Tiejun Chen 
---
 arch/powerpc/kernel/setup_64.c |   18 --
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 278ca93..5c96d92 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -526,14 +526,20 @@ static void __init exc_lvl_early_init(void)
extern unsigned int exc_debug_debug_book3e;
 
unsigned int i;
+   unsigned long sp;
 
for_each_possible_cpu(i) {
-   critirq_ctx[i] = (struct thread_info *)
-   __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
-   dbgirq_ctx[i] = (struct thread_info *)
-   __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
-   mcheckirq_ctx[i] = (struct thread_info *)
-   __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
+   sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+   critirq_ctx[i] = (struct thread_info *)__va(sp);
+   paca[i].crit_kstack = __va(sp + THREAD_SIZE);
+
+   sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+   dbgirq_ctx[i] = (struct thread_info *)__va(sp);
+   paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
+
+   sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+   mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
+   paca[i].mc_kstack = __va(sp + THREAD_SIZE);
}
 
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


  1   2   >