patch-2.4.10 linux/arch/ppc/kernel/head_8xx.S
Next file: linux/arch/ppc/kernel/idle.c
Previous file: linux/arch/ppc/kernel/head.S
Back to the patch index
Back to the overall index
- Lines: 453
- Date:
Wed Aug 29 20:49:36 2001
- Orig file:
v2.4.9/linux/arch/ppc/kernel/head_8xx.S
- Orig date:
Tue Jul 3 17:08:18 2001
diff -u --recursive --new-file v2.4.9/linux/arch/ppc/kernel/head_8xx.S linux/arch/ppc/kernel/head_8xx.S
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.head_8xx.S 1.14 06/28/01 15:50:16 paulus
+ * BK Id: SCCS/s.head_8xx.S 1.21 08/28/01 16:27:27 trini
*/
/*
* arch/ppc/kernel/except_8xx.S
@@ -31,6 +31,7 @@
#include <asm/mmu.h>
#include <asm/cache.h>
#include <asm/pgtable.h>
+#include <asm/cputable.h>
.text
.globl _stext
@@ -87,70 +88,10 @@
mr r27,r7
li r24,0 /* cpu # */
- tlbia /* Invalidate all TLB entries */
- li r8, 0
- mtspr MI_CTR, r8 /* Set instruction control to zero */
- lis r8, MD_RESETVAL@h
-#ifndef CONFIG_8xx_COPYBACK
- oris r8, r8, MD_WTDEF@h
-#endif
- mtspr MD_CTR, r8 /* Set data TLB control */
-
- /* Now map the lower 8 Meg into the TLBs. For this quick hack,
- * we can load the instruction and data TLB registers with the
- * same values.
- */
- lis r8, KERNELBASE@h /* Create vaddr for TLB */
- ori r8, r8, MI_EVALID /* Mark it valid */
- mtspr MI_EPN, r8
- mtspr MD_EPN, r8
- li r8, MI_PS8MEG /* Set 8M byte page */
- ori r8, r8, MI_SVALID /* Make it valid */
- mtspr MI_TWC, r8
- mtspr MD_TWC, r8
- li r8, MI_BOOTINIT /* Create RPN for address 0 */
- mtspr MI_RPN, r8 /* Store TLB entry */
- mtspr MD_RPN, r8
- lis r8, MI_Kp@h /* Set the protection mode */
- mtspr MI_AP, r8
- mtspr MD_AP, r8
-
- /* Map another 8 MByte at the IMMR to get the processor
- * internal registers (among other things).
+ /* We have to turn on the MMU right away so we get cache modes
+ * set correctly.
*/
- mfspr r9, 638 /* Get current IMMR */
- andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
-
- mr r8, r9 /* Create vaddr for TLB */
- ori r8, r8, MD_EVALID /* Mark it valid */
- mtspr MD_EPN, r8
- li r8, MD_PS8MEG /* Set 8M byte page */
- ori r8, r8, MD_SVALID /* Make it valid */
- mtspr MD_TWC, r8
- mr r8, r9 /* Create paddr for TLB */
- ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
- mtspr MD_RPN, r8
-
- /* Since the cache is enabled according to the information we
- * just loaded into the TLB, invalidate and enable the caches here.
- * We should probably check/set other modes....later.
- */
- lis r8, IDC_INVALL@h
- mtspr IC_CST, r8
- mtspr DC_CST, r8
- lis r8, IDC_ENABLE@h
- mtspr IC_CST, r8
-#ifdef CONFIG_8xx_COPYBACK
- mtspr DC_CST, r8
-#else
- /* For a debug option, I left this here to easily enable
- * the write through cache mode
- */
- lis r8, DC_SFWT@h
- mtspr DC_CST, r8
- lis r8, IDC_ENABLE@h
- mtspr DC_CST, r8
-#endif
+ bl initial_mmu
/* We now have the lower 8 Meg mapped into TLB entries, and the caches
* ready to work.
@@ -513,6 +454,7 @@
#else
li r21, 0x00f0
rlwimi r20, r21, 0, 24, 28
+
#endif
#ifdef CONFIG_8xx_CPU6
li r3, 0x3d80
@@ -721,131 +663,34 @@
giveup_fpu:
blr
-/*
- * This code is jumped to from the startup code to copy
- * the kernel image to physical address 0.
- */
-relocate_kernel:
- lis r9,0x426f /* if booted from BootX, don't */
- addi r9,r9,0x6f58 /* translate source addr */
- cmpw r31,r9 /* (we have to on chrp) */
- beq 7f
- rlwinm r4,r4,0,8,31 /* translate source address */
- add r4,r4,r3 /* to region mapped with BATs */
-7: addis r9,r26,klimit@ha /* fetch klimit */
- lwz r25,klimit@l(r9)
- addis r25,r25,-KERNELBASE@h
- li r6,0 /* Destination offset */
- li r5,0x4000 /* # bytes of memory to copy */
- bl copy_and_flush /* copy the first 0x4000 bytes */
- addi r0,r3,4f@l /* jump to the address of 4f */
- mtctr r0 /* in copy and do the rest. */
- bctr /* jump to the copy */
-4: mr r5,r25
- bl copy_and_flush /* copy the rest */
- b turn_on_mmu
-
-/*
- * Copy routine used to copy the kernel to start at physical address 0
- * and flush and invalidate the caches as needed.
- * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
- * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
- */
-copy_and_flush:
- addi r5,r5,-4
- addi r6,r6,-4
-4: li r0,CACHE_LINE_SIZE/4
- mtctr r0
-3: addi r6,r6,4 /* copy a cache line */
- lwzx r0,r6,r4
- stwx r0,r6,r3
- bdnz 3b
- dcbst r6,r3 /* write it to memory */
- sync
- icbi r6,r3 /* flush the icache line */
- cmplw 0,r6,r5
- blt 4b
- isync
- addi r5,r5,4
- addi r6,r6,4
+/* Maybe someday.......
+*/
+_GLOBAL(__setup_cpu_8xx)
blr
-#ifdef CONFIG_SMP
- .globl __secondary_start_psurge
-__secondary_start_psurge:
- li r24,1 /* cpu # */
- b __secondary_start
-
- .globl __secondary_hold
-__secondary_hold:
- /* tell the master we're here */
- lis r5,0x4@h
- ori r5,r5,0x4@l
- stw r3,0(r5)
- dcbf 0,r5
-100:
- lis r5,0
- dcbi 0,r5
- lwz r4,0(r5)
- /* wait until we're told to start */
- cmp 0,r4,r3
- bne 100b
- /* our cpu # was at addr 0 - go */
- lis r5,__secondary_start@h
- ori r5,r5,__secondary_start@l
- tophys(r5,r5)
- mtlr r5
- mr r24,r3 /* cpu # */
- blr
-#endif /* CONFIG_SMP */
-
/*
* This is where the main kernel code starts.
*/
start_here:
-#ifdef CONFIG_SMP
- /* if we're the second cpu stack and r2 are different
- * and we want to not clear the bss -- Cort */
- lis r5,first_cpu_booted@h
- ori r5,r5,first_cpu_booted@l
- lwz r5,0(r5)
- cmpi 0,r5,0
- beq 99f
-
- /* get current */
- lis r2,current_set@h
- ori r2,r2,current_set@l
- slwi r24,r24,2 /* cpu # to current_set[cpu#] */
- add r2,r2,r24
- lwz r2,0(r2)
- b 10f
-99:
-#endif /* CONFIG_SMP */
+
/* ptr to current */
lis r2,init_task_union@h
ori r2,r2,init_task_union@l
- /* Clear out the BSS */
- lis r11,_end@ha
- addi r11,r11,_end@l
- lis r8,__bss_start@ha
- addi r8,r8,__bss_start@l
- subf r11,r8,r11
- addi r11,r11,3
- rlwinm. r11,r11,30,2,31
- beq 2f
- addi r8,r8,-4
- mtctr r11
- li r0,0
-3: stwu r0,4(r8)
- bdnz 3b
-2:
-#ifdef CONFIG_SMP
-10:
-#endif /* CONFIG_SMP */
+
+ /* ptr to phys current thread */
+ tophys(r4,r2)
+ addi r4,r4,THREAD /* init task's THREAD */
+ mtspr SPRG3,r4
+ li r3,0
+ mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
+
/* stack */
addi r1,r2,TASK_UNION_SIZE
li r0,0
stwu r0,-STACK_FRAME_OVERHEAD(r1)
+
+ bl early_init /* We have to do this with MMU on */
+
/*
* Decide what sort of machine this is and initialize the MMU.
*/
@@ -854,12 +699,11 @@
mr r5,r29
mr r6,r28
mr r7,r27
- bl identify_machine
+ bl machine_init
bl MMU_init
/*
* Go back to running unmapped so we can load up new values
- * for SDR1 (hash table pointer) and the segment registers
* and change to using our exception vectors.
* On the 8xx, all we have to do is invalidate the TLB to clear
* the old 8M byte TLB mappings and load the page table base register.
@@ -869,8 +713,8 @@
* easier......until someone changes init's static structures.
*/
lis r6, swapper_pg_dir@h
- tophys(r6,r6)
ori r6, r6, swapper_pg_dir@l
+ tophys(r6,r6)
#ifdef CONFIG_8xx_CPU6
lis r4, cpu6_errata_word@h
ori r4, r4, cpu6_errata_word@l
@@ -891,65 +735,117 @@
SYNC /* Force all PTE updates to finish */
tlbia /* Clear all TLB entries */
sync /* wait for tlbia/tlbie to finish */
-#ifdef CONFIG_SMP
- tlbsync /* ... on all CPUs */
- sync
-#endif
-/* Set up for using our exception vectors */
- /* ptr to phys current thread */
- tophys(r4,r2)
- addi r4,r4,THREAD /* init task's THREAD */
- mtspr SPRG3,r4
- li r3,0
- mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
+ TLBSYNC /* ... on all CPUs */
+
+ /* set up the PTE pointers for the Abatron bdiGDB.
+ */
+ tovirt(r6,r6)
+ lis r5, abatron_pteptrs@h
+ ori r5, r5, abatron_pteptrs@l
+ stw r5, 0xf0(r0) /* Must match your Abatron config file */
+ tophys(r5,r5)
+ stw r6, 0(r5)
+
/* Now turn on the MMU for real! */
li r4,MSR_KERNEL
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
-#ifdef CONFIG_SMP
- /* the second time through here we go to
- * start_secondary(). -- Cort
- */
- lis r5,first_cpu_booted@h
- ori r5,r5,first_cpu_booted@l
- tophys(r5,r5)
- lwz r5,0(r5)
- cmpi 0,r5,0
- beq 10f
- lis r3,start_secondary@h
- ori r3,r3,start_secondary@l
-10:
-#endif /* CONFIG_SMP */
mtspr SRR0,r3
mtspr SRR1,r4
rfi /* enable MMU and jump to start_kernel */
+/* Set up the initial MMU state so we can do the first level of
+ * kernel initialization. This maps the first 8 MBytes of memory 1:1
+ * virtual to physical. Also, set the cache mode since that is defined
+ * by TLB entries and perform any additional mapping (like of the IMMR).
+ */
+initial_mmu:
+ tlbia /* Invalidate all TLB entries */
+ li r8, 0
+ mtspr MI_CTR, r8 /* Set instruction control to zero */
+ lis r8, MD_RESETVAL@h
+#ifndef CONFIG_8xx_COPYBACK
+ oris r8, r8, MD_WTDEF@h
+#endif
+ mtspr MD_CTR, r8 /* Set data TLB control */
+
+ /* Now map the lower 8 Meg into the TLBs. For this quick hack,
+ * we can load the instruction and data TLB registers with the
+ * same values.
+ */
+ lis r8, KERNELBASE@h /* Create vaddr for TLB */
+ ori r8, r8, MI_EVALID /* Mark it valid */
+ mtspr MI_EPN, r8
+ mtspr MD_EPN, r8
+ li r8, MI_PS8MEG /* Set 8M byte page */
+ ori r8, r8, MI_SVALID /* Make it valid */
+ mtspr MI_TWC, r8
+ mtspr MD_TWC, r8
+ li r8, MI_BOOTINIT /* Create RPN for address 0 */
+ mtspr MI_RPN, r8 /* Store TLB entry */
+ mtspr MD_RPN, r8
+ lis r8, MI_Kp@h /* Set the protection mode */
+ mtspr MI_AP, r8
+ mtspr MD_AP, r8
+
+ /* Map another 8 MByte at the IMMR to get the processor
+ * internal registers (among other things).
+ */
+ mfspr r9, 638 /* Get current IMMR */
+ andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
+
+ mr r8, r9 /* Create vaddr for TLB */
+ ori r8, r8, MD_EVALID /* Mark it valid */
+ mtspr MD_EPN, r8
+ li r8, MD_PS8MEG /* Set 8M byte page */
+ ori r8, r8, MD_SVALID /* Make it valid */
+ mtspr MD_TWC, r8
+ mr r8, r9 /* Create paddr for TLB */
+ ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
+ mtspr MD_RPN, r8
+
+ /* Since the cache is enabled according to the information we
+ * just loaded into the TLB, invalidate and enable the caches here.
+ * We should probably check/set other modes....later.
+ */
+ lis r8, IDC_INVALL@h
+ mtspr IC_CST, r8
+ mtspr DC_CST, r8
+ lis r8, IDC_ENABLE@h
+ mtspr IC_CST, r8
+#ifdef CONFIG_8xx_COPYBACK
+ mtspr DC_CST, r8
+#else
+ /* For a debug option, I left this here to easily enable
+ * the write through cache mode
+ */
+ lis r8, DC_SFWT@h
+ mtspr DC_CST, r8
+ lis r8, IDC_ENABLE@h
+ mtspr DC_CST, r8
+#endif
+ blr
+
+
/*
* Set up to use a given MMU context.
+ * r3 is context number, r4 is PGD pointer.
*
- * The MPC8xx has something that currently happens "automagically."
- * Unshared user space address translations are subject to ASID (context)
- * match. During each task switch, the ASID is incremented. We can
- * guarantee (I hope :-) that no entries currently match this ASID
- * because every task will cause at least a TLB entry to be loaded for
- * the first instruction and data access, plus the kernel running will
- * have displaced several more TLBs. The MMU contains 32 entries for
- * each TLB, and there are 16 contexts, so we just need to make sure
- * two pages get replaced for every context switch, which currently
- * happens. There are other TLB management techniques that I will
- * eventually implement, but this is the easiest for now. -- Dan
- *
- * On the MPC8xx, we place the physical address of the new task
- * page directory loaded into the MMU base register, and set the
- * ASID compare register with the new "context".
+ * We place the physical address of the new task page directory loaded
+ * into the MMU base register, and set the ASID compare register with
+ * the new "context."
*/
_GLOBAL(set_context)
- /* fetch the pgd from the context_mm array */
- lis r5, context_mm@ha
- slwi r6, r3, 2
- add r5, r5, r6
- lwz r5, context_mm@l(r5) /* get the mm */
- lwz r4, MM_PGD(r5) /* get the pgd from the mm */
+
+#ifdef CONFIG_BDI_SWITCH
+ /* Context switch the PTE pointer for the Abatron BDI2000.
+ * The PGDIR is passed as second argument.
+ */
+ lis r5, KERNELBASE@h
+ lwz r5, 0xf0(r5)
+ stw r4, 0x4(r5)
+#endif
+
#ifdef CONFIG_8xx_CPU6
lis r6, cpu6_errata_word@h
ori r6, r6, cpu6_errata_word@l
@@ -967,7 +863,6 @@
tophys (r4, r4)
mtspr M_TWB, r4 /* and pgd */
#endif
- tlbia
SYNC
blr
@@ -1012,6 +907,12 @@
.globl cmd_line
cmd_line:
.space 512
+
+/* Room for two PTE table poiners, usually the kernel and current user
+ * pointer to their respective root page table (pgdir).
+ */
+abatron_pteptrs:
+ .space 8
#ifdef CONFIG_8xx_CPU6
.globl cpu6_errata_word
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)