On 07/04/2013 07:54:13 AM, Kevin Hao wrote:
@@ -1222,6 +1266,9 @@ _GLOBAL(switch_to_as1)
/*
* Restore to the address space 0 and also invalidate the tlb entry
created
* by switch_to_as1.
+ * r3 - the tlb entry which should be invalidated
+ * r4 - __pa(PAGE_OFFSET in AS0) - pa(PAGE_OFFSET in AS1)
+ * r5 - device tree virtual address
*/
_GLOBAL(restore_to_as0)
mflr r0
@@ -1230,7 +1277,15 @@ _GLOBAL(restore_to_as0)
0: mflr r9
addi r9,r9,1f - 0b
- mfmsr r7
+ /*
+ * We may map the PAGE_OFFSET in AS0 to a different physical
address,
+ * so we need calculate the right jump and device tree address
based
+ * on the offset passed by r4.
+ */
Whitespace
+ subf r9,r4,r9
+ subf r5,r4,r5
+
+2: mfmsr r7
li r8,(MSR_IS | MSR_DS)
andc r7,r7,r8
@@ -1249,9 +1304,19 @@ _GLOBAL(restore_to_as0)
mtspr SPRN_MAS1,r9
tlbwe
isync
+
+ cmpwi r4,0
+ bne 3f
mtlr r0
blr
+ /*
+ * The PAGE_OFFSET will map to a different physical address,
+ * jump to _start to do another relocation again.
+ */
+3: mr r3,r5
+ bl _start
+
/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c
b/arch/powerpc/mm/fsl_booke_mmu.c
index 8f60ef8..dd283fd 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -224,7 +224,7 @@ void __init adjust_total_lowmem(void)
i = switch_to_as1();
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
- restore_to_as0(i);
+ restore_to_as0(i, 0, 0);
The device tree virtual address is zero?
pr_info("Memory CAM mapping: ");
for (i = 0; i < tlbcam_index - 1; i++)
@@ -245,30 +245,56 @@ void setup_initial_memory_limit(phys_addr_t
first_memblock_base,
}
#ifdef CONFIG_RELOCATABLE
-notrace void __init relocate_init(phys_addr_t start)
+int __initdata is_second_reloc;
+notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
{
unsigned long base = KERNELBASE;
- /*
- * Relocatable kernel support based on processing of dynamic
- * relocation entries.
- * Compute the virt_phys_offset :
- * virt_phys_offset = stext.run - kernstart_addr
- *
- * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr &
0xfffffff)
- * When we relocate, we have :
- *
- * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
- *
- * hence:
- * virt_phys_offset = (KERNELBASE & ~0xfffffff) -
- * (kernstart_addr & ~0xfffffff)
- *
- */
kernstart_addr = start;
- start &= ~0xfffffff;
- base &= ~0xfffffff;
- virt_phys_offset = base - start;
+ if (!is_second_reloc) {
Since it's at the end of a function and one side is much shorter than
the
other, please do:
if (is_second_reloc) {
virt_phys_offset = PAGE_OFFSET - memstart_addr;
return;
}
/* the rest of the code goes here without having to indent
everything */
Otherwise, please use positive logic for if/else constructs.
+ phys_addr_t size;
+
+ /*
+ * Relocatable kernel support based on processing of
dynamic
+ * relocation entries. Before we get the real
memstart_addr,
+ * We will compute the virt_phys_offset like this:
+ * virt_phys_offset = stext.run - kernstart_addr
+ *
+ * stext.run = (KERNELBASE & ~0xfffffff) +
+ * (kernstart_addr &
0xfffffff)
+ * When we relocate, we have :
+ *
+ * (kernstart_addr & 0xfffffff) = (stext.run &
0xfffffff)
+ *
+ * hence:
+ * virt_phys_offset = (KERNELBASE & ~0xfffffff) -
+ * (kernstart_addr &
~0xfffffff)
+ *
+ */
+ start &= ~0xfffffff;
+ base &= ~0xfffffff;
+ virt_phys_offset = base - start;
+ early_get_first_memblock_info(__va(dt_ptr), &size);
+ /*
+ * We now get the memstart_addr, then we should check
if this
+ * address is the same as what the PAGE_OFFSET map to
now. If
+ * not we have to change the map of PAGE_OFFSET to
memstart_addr
+ * and do a second relocation.
+ */
+ if (start != memstart_addr) {
+ unsigned long ram;
+ int n, offset = memstart_addr - start;
+
+ is_second_reloc = 1;
+ ram = size;
+ n = switch_to_as1();
+ map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
Do we really need this much RAM mapped at this point? Why can't we
continue
with the same size TLB entry that we've been using, until the second
relocation?
+ restore_to_as0(n, offset, __va(dt_ptr));
+ /* We should never reach here */
+ panic("Relocation error");
Where is execution supposed to resume? It looks like you're expecting
it
to resume from _start, but why? And where is this effect of
restore_to_as0() documented?
-Scott
_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev