patch-2.4.20 linux-2.4.20/arch/ppc/kernel/head.S
Next file: linux-2.4.20/arch/ppc/kernel/head_4xx.S
Previous file: linux-2.4.20/arch/ppc/kernel/gemini_setup.c
Back to the patch index
Back to the overall index
- Lines: 542
- Date:
Thu Nov 28 15:53:11 2002
- Orig file:
linux-2.4.19/arch/ppc/kernel/head.S
- Orig date:
Fri Aug 2 17:39:43 2002
diff -urN linux-2.4.19/arch/ppc/kernel/head.S linux-2.4.20/arch/ppc/kernel/head.S
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.head.S 1.43 06/25/02 17:24:29 benh
+ * BK Id: %F% %I% %G% %U% %#%
*/
/*
* PowerPC version
@@ -22,17 +22,19 @@
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
- *
+ *
*/
#include <linux/config.h>
-#include "ppc_asm.h"
+#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
+#include <asm/ppc_asm.h>
+#include "ppc_defs.h"
#ifdef CONFIG_APUS
#include <asm/amigappc.h>
@@ -48,9 +50,9 @@
ld RB,(n*32)+24(reg); \
mtspr DBAT##n##U,RA; \
mtspr DBAT##n##L,RB; \
-
+
#else /* CONFIG_PPC64BRIDGE */
-
+
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
/* see the comment for clear_bats() -- Cort */ \
@@ -66,7 +68,7 @@
lwz RB,(n*16)+12(reg); \
mtspr DBAT##n##U,RA; \
mtspr DBAT##n##L,RB; \
-1:
+1:
#endif /* CONFIG_PPC64BRIDGE */
.text
@@ -113,7 +115,7 @@
* PREP
* This is jumped to on prep systems right after the kernel is relocated
* to its proper place in memory by the boot loader. The expected layout
- * of the regs is:
+ * of the regs is:
* r3: ptr to residual data
* r4: initrd_start or if no initrd then 0
* r5: initrd_end - unused if r4 is 0
@@ -124,7 +126,7 @@
* start_here() to do the real work.
* -- Cort
*/
-
+
.globl __start
__start:
/*
@@ -184,6 +186,17 @@
mtsr 12,r5
#endif /* CONFIG_POWER4 */
+ /*
+ * Call setup_cpu for CPU 0
+ */
+ bl reloc_offset
+ li r24,0 /* cpu# */
+ bl call_setup_cpu /* Call setup_cpu for this CPU */
+#ifdef CONFIG_6xx
+ bl reloc_offset
+ bl init_idle_6xx
+#endif /* CONFIG_6xx */
+
#ifndef CONFIG_APUS
/*
* We need to run with _start at physical address 0.
@@ -404,16 +417,12 @@
EXCEPTION_PROLOG;
addi r3,r1,STACK_FRAME_OVERHEAD
li r20,MSR_KERNEL
-#ifndef CONFIG_APUS
li r4,0
bl transfer_to_handler
.globl do_IRQ_intercept
do_IRQ_intercept:
.long do_IRQ;
.long ret_from_intercept
-#else
- bl apus_interrupt_entry
-#endif /* CONFIG_APUS */
/* Alignment exception */
. = 0x600
@@ -503,7 +512,7 @@
Trap_0f:
EXCEPTION_PROLOG
b trap_0f_cont
-
+
/*
* Handle TLB miss for instruction on 603/603e.
* Note: we get an alternate set of r0 - r3 to use automatically.
@@ -652,7 +661,7 @@
mtcrf 0x80,r3 /* Restore CR0 */
mtmsr r0
b DataAccess
-
+
/*
* Handle TLB miss for DATA Store on 603/603e
*/
@@ -775,6 +784,7 @@
SAVE_8GPRS(24, r21)
andi. r23,r23,MSR_PR
mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
+ addi r2,r23,-THREAD /* set r2 to current */
beq 2f
addi r24,r1,STACK_FRAME_OVERHEAD
stw r24,PT_REGS(r23)
@@ -784,7 +794,8 @@
stw r22,THREAD_VRSAVE(r23)
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
-2: addi r2,r23,-THREAD /* set r2 to current */
+ .globl transfer_to_handler_cont
+transfer_to_handler_cont:
tovirt(r2,r2)
mflr r23
andi. r24,r23,0x3f00 /* get vector offset */
@@ -805,6 +816,23 @@
mtlr r23
SYNC
RFI /* jump to handler, enable MMU */
+2:
+ /* Out of line case when returning to kernel,
+ * check return from power_save_6xx
+ */
+#ifdef CONFIG_6xx
+
+ mfspr r24,SPRN_HID0
+ mtcr r24
+BEGIN_FTR_SECTION
+ bt- 8,power_save_6xx_restore /* Check DOZE */
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
+BEGIN_FTR_SECTION
+ bt- 9,power_save_6xx_restore /* Check NAP */
+END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
+ b transfer_to_handler_cont
+
+#endif /* CONFIG_6xx */
/*
* On kernel stack overflow, load up an initial stack pointer
@@ -891,7 +919,7 @@
lwz r21,GPR21(r21)
SYNC
RFI
-
+
/*
* FP unavailable trap from kernel - print a message, but let
* the task use FP in the kernel until it returns to user mode.
@@ -1036,7 +1064,7 @@
#endif /* CONFIG_SMP */
blr
#endif /* CONFIG_ALTIVEC */
-
+
/*
* giveup_fpu(tsk)
* Disable FP for the task given as the argument,
@@ -1188,62 +1216,6 @@
sync /* additional sync needed on g4 */
isync /* No speculative loading until now */
blr
-
-apus_interrupt_entry:
- /* This is horrible, but there's no way around it. Enable the
- * data cache so the IRQ hardware register can be accessed
- * without cache intervention. Then disable interrupts and get
- * the current emulated m68k IPL value.
- */
-
- mfmsr 20
- xori r20,r20,MSR_DR
- SYNC
- mtmsr r20
- isync
-
- lis r4,APUS_IPL_EMU@h
-
- li r20,(IPLEMU_SETRESET|IPLEMU_DISABLEINT)
- stb r20,APUS_IPL_EMU@l(r4)
- eieio
-
- lbz r3,APUS_IPL_EMU@l(r4)
-
- li r2,IPLEMU_IPLMASK
- rlwinm. r20,r3,32-3,29,31
- bne 2f
- mr r20,r2 /* lvl7! Need to reset state machine. */
- b 3f
-2: cmp 0,r20,r2
- beq 1f
-3: eieio
- stb r2,APUS_IPL_EMU@l(r4)
- ori r20,r20,IPLEMU_SETRESET
- eieio
- stb r20,APUS_IPL_EMU@l(r4)
-1: eieio
- li r20,IPLEMU_DISABLEINT
- stb r20,APUS_IPL_EMU@l(r4)
-
- /* At this point we could do some magic to avoid the overhead
- * of calling the C interrupt handler in case of a spurious
- * interrupt. Could not get a simple hack to work though.
- */
-
- mfmsr r20
- xori r20,r20,MSR_DR
- SYNC
- mtmsr r20
- isync
-
- stw r3,(_CCR+4)(r21);
-
- addi r3,r1,STACK_FRAME_OVERHEAD;
- li r20,MSR_KERNEL;
- bl transfer_to_handler;
- .long do_IRQ;
- .long ret_from_except
/***********************************************************************
* Please note that on APUS the exception handlers are located at the
@@ -1266,7 +1238,6 @@
bl prom_init
b __secondary_start
#endif /* CONFIG_GEMINI */
-
.globl __secondary_start_psurge
__secondary_start_psurge:
li r24,1 /* cpu # */
@@ -1302,7 +1273,11 @@
mr r4,r24
bl identify_cpu
bl call_setup_cpu /* Call setup_cpu for this CPU */
-
+#ifdef CONFIG_6xx
+ lis r3,-KERNELBASE@h
+ bl init_idle_6xx
+#endif /* CONFIG_6xx */
+
/* get current */
lis r2,current_set@h
ori r2,r2,current_set@l
@@ -1357,6 +1332,20 @@
bl setup_750_7400_hid0
mtlr r4
blr
+_GLOBAL(__setup_cpu_750cx)
+ mflr r4
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ bl setup_750cx
+ mtlr r4
+ blr
+_GLOBAL(__setup_cpu_750fx)
+ mflr r4
+ bl setup_common_caches
+ bl setup_750_7400_hid0
+ bl setup_750fx
+ mtlr r4
+ blr
_GLOBAL(__setup_cpu_7400)
mflr r4
bl setup_common_caches
@@ -1374,19 +1363,13 @@
_GLOBAL(__setup_cpu_7450)
mflr r4
bl setup_common_caches
- bl setup_7450_hid0
- mtlr r4
- blr
-_GLOBAL(__setup_cpu_7450_23)
- mflr r4
- bl setup_common_caches
- bl setup_7450_23_hid0
+ bl setup_745x_specifics
mtlr r4
blr
_GLOBAL(__setup_cpu_7455)
mflr r4
bl setup_common_caches
- bl setup_7455_hid0
+ bl setup_745x_specifics
mtlr r4
blr
_GLOBAL(__setup_cpu_power3)
@@ -1400,7 +1383,11 @@
setup_common_caches:
mfspr r11,HID0
andi. r0,r11,HID0_DCE
+#ifdef CONFIG_DCACHE_DISABLE
+ ori r11,r11,HID0_ICE
+#else
ori r11,r11,HID0_ICE|HID0_DCE
+#endif
ori r8,r11,HID0_ICFI
bne 1f /* don't invalidate the D-cache */
ori r8,r8,HID0_DCI /* unless it wasn't enabled */
@@ -1447,7 +1434,19 @@
isync
blr
-/* 7450
+/* 750cx specific
+ * Looks like we have to disable NAP feature for some PLL settings...
+ * (waiting for confirmation)
+ */
+setup_750cx:
+ blr
+
+/* 750fx specific
+ */
+setup_750fx:
+ blr
+
+/* MPC 745x
* Enable Store Gathering (SGE), Branch Folding (FOLD)
* Branch History Table (BHTE), Branch Target ICache (BTIC)
* Dynamic Power Management (DPM), Speculative (SPD)
@@ -1455,8 +1454,9 @@
* Timebase has to be running or we wouldn't have made it here,
* just ensure we don't disable it.
* Clear Instruction cache throttling (ICTC)
+ * Enable L2 HW prefetch
*/
-setup_7450_hid0:
+setup_745x_specifics:
/* We check for the presence of an L3 cache setup by
* the firmware. If any, we disable NAP capability as
* it's known to be bogus on rev 2.1 and earlier
@@ -1464,21 +1464,22 @@
mfspr r11,SPRN_L3CR
andis. r11,r11,L3CR_L3E@h
beq 1f
- li r7,CPU_FTR_CAN_NAP
lwz r6,CPU_SPEC_FEATURES(r5)
+ andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
+ beq 1f
+ li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
1:
-setup_7450_23_hid0:
mfspr r11,HID0
/* All of the bits we have to set.....
- */
+ */
ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
/* All of the bits we have to clear....
- */
+ */
li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
andc r11,r11,r3 /* clear SPD: enable speculative */
li r3,0
@@ -1488,34 +1489,13 @@
mtspr HID0,r11
sync
isync
- blr
-
-/* 7450
- * Enable Store Gathering (SGE), Branch Folding (FOLD)
- * Branch History Table (BHTE), Branch Target ICache (BTIC)
- * Dynamic Power Management (DPM), Speculative (SPD)
- * Ensure our data cache instructions really operate.
- * Timebase has to be running or we wouldn't have made it here,
- * just ensure we don't disable it.
- * Clear Instruction cache throttling (ICTC)
- */
-setup_7455_hid0:
- mfspr r11,HID0
-
- /* All of the bits we have to set.....
- */
- ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
- oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
- /* All of the bits we have to clear....
- */
- li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
- andc r11,r11,r3 /* clear SPD: enable speculative */
- li r3,0
-
- mtspr ICTC,r3 /* Instruction Cache Throttling off */
- isync
- mtspr HID0,r11
+ /* Enable L2 HW prefetch
+ */
+ mfspr r3,SPRN_MSSCR0
+ ori r3,r3,3
+ sync
+ mtspr SPRN_MSSCR0,r3
sync
isync
blr
@@ -1563,11 +1543,6 @@
* This is where the main kernel code starts.
*/
start_here:
- /* Call setup_cpu for CPU 0 */
- li r3,0 /* data offset */
- li r24,0 /* cpu# */
- bl call_setup_cpu
-
/* ptr to current */
lis r2,init_task_union@h
ori r2,r2,init_task_union@l
@@ -1626,12 +1601,27 @@
/* Load up the kernel context */
2:
sync /* Force all PTE updates to finish */
- ISYNC_601
+ isync
tlbia /* Clear all TLB entries */
sync /* wait for tlbia/tlbie to finish */
TLBSYNC /* ... on all CPUs */
bl load_up_mmu
+
+#ifdef CONFIG_BDI_SWITCH
+ /* Add helper information for the Abatron bdiGDB debugger.
+ * We do this here because we know the mmu is disabled, and
+ * will be enabled for real in just a few instructions.
+ */
+ lis r5, abatron_pteptrs@h
+ ori r5, r5, abatron_pteptrs@l
+ stw r5, 0xf0(r0) /* This much match your Abatron config */
+ lis r6, swapper_pg_dir@h
+ ori r6, r6, swapper_pg_dir@l
+ tophys(r5, r5)
+ stw r6, 0(r5)
+#endif
+
/* Now turn on the MMU for real! */
li r4,MSR_KERNEL
FIX_SRR1(r4,r5)
@@ -1651,12 +1641,22 @@
addis r3,r3,0x6000 /* Set Ks, Ku bits */
li r0,NUM_USER_SEGMENTS
mtctr r0
+
+#ifdef CONFIG_BDI_SWITCH
+ /* Context switch the PTE pointer for the Abatron BDI2000.
+ * The PGDIR is passed as second argument.
+ */
+ lis r5, KERNELBASE@h
+ lwz r5, 0xf0(r5)
+ stw r4, 0x4(r5)
+#endif
+
li r4,0
BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-3:
+3: isync
#ifdef CONFIG_PPC64BRIDGE
slbie r4
#endif /* CONFIG_PPC64BRIDGE */
@@ -1684,16 +1684,16 @@
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
cmpwi r9, 1
beq 1f
-
+
mtspr DBAT0U,r20
- mtspr DBAT0L,r20
+ mtspr DBAT0L,r20
mtspr DBAT1U,r20
mtspr DBAT1L,r20
mtspr DBAT2U,r20
- mtspr DBAT2L,r20
+ mtspr DBAT2L,r20
mtspr DBAT3U,r20
mtspr DBAT3L,r20
-1:
+1:
mtspr IBAT0U,r20
mtspr IBAT0L,r20
mtspr IBAT1U,r20
@@ -1762,7 +1762,7 @@
#else
ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
#endif /* CONFIG_APUS */
-
+
#ifdef CONFIG_PPC64BRIDGE
/* clear out the high 32 bits in the BAT */
clrldi r11,r11,32
@@ -1851,12 +1851,12 @@
.globl swapper_pg_dir
swapper_pg_dir:
- .space 4096
+ .space 4096
/*
* This space gets a copy of optional info passed to us by the bootstrap
* Used to pass parameters into the kernel like root=/dev/sda1, etc.
- */
+ */
.globl cmd_line
cmd_line:
.space 512
@@ -1869,3 +1869,11 @@
.long 0, 0, 0, 0, 0, 0, 0, 0
.long 0, 0, 0, 0, 0, 0, 0, 0
.long 0, 0, 0, 0, 0, 0, 0, 0
+
+#ifdef CONFIG_BDI_SWITCH
+/* Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+abatron_pteptrs:
+ .space 8
+#endif
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)