patch-1.3.44 linux/arch/sparc/kernel/etrap.S
Next file: linux/arch/sparc/kernel/head.S
Previous file: linux/arch/sparc/kernel/errtbls.c
Back to the patch index
Back to the overall index
- Lines: 327
- Date:
Sat Nov 25 02:58:00 1995
- Orig file:
v1.3.43/linux/arch/sparc/kernel/etrap.S
- Orig date:
Thu Jan 1 02:00:00 1970
diff -u --recursive --new-file v1.3.43/linux/arch/sparc/kernel/etrap.S linux/arch/sparc/kernel/etrap.S
@@ -0,0 +1,326 @@
+/* $Id: etrap.S,v 1.10 1995/11/25 00:57:58 davem Exp $
+ * etrap.S: Sparc trap window preparation for entry into the
+ * Linux kernel.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/head.h>
+#include <asm/page.h>
+#include <asm/psr.h>
+#include <asm/ptrace.h>
+#include <asm/winmacro.h>
+
+/* Registers to not touch at all. */
+#define t_psr l0 /* Set by caller */
+#define t_pc l1 /* Set by caller */
+#define t_npc l2 /* Set by caller */
+#define t_wim l3 /* Set by caller */
+#define t_twinmask l4 /* Set at beginning of this entry routine. */
+#define t_kstack l5 /* Set right before pt_regs frame is built */
+#define t_retpc l6 /* If you change this, change winmacro.h header file */
+#define t_systable l7 /* Never touch this, could be the syscall table ptr. */
+#define curptr g4 /* Set after pt_regs frame is built */
+
+ .text
+ .align 4
+
+ /* SEVEN WINDOW PATCH INSTRUCTIONS */
+ .globl tsetup_7win_patch1, tsetup_7win_patch2
+ .globl tsetup_7win_patch3, tsetup_7win_patch4
+ .globl tsetup_7win_patch5, tsetup_7win_patch6
+tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
+tsetup_7win_patch2: and %g2, 0x7f, %g2
+tsetup_7win_patch3: and %g2, 0x7f, %g2
+tsetup_7win_patch4: and %g1, 0x7f, %g1
+tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
+tsetup_7win_patch6: and %g2, 0x7f, %g2
+ /* END OF PATCH INSTRUCTIONS */
+
+ /* At trap time, interrupts and all generic traps do the
+ * following:
+ *
+ * rd %psr, %l0
+ * b some_handler
+ * rd %wim, %l3
+ * nop
+ *
+ * Then 'some_handler' if it needs a trap frame (ie. it has
+ * to call c-code and the trap cannot be handled in-window)
+ * then it does the SAVE_ALL macro in entry.S which does
+ *
+ * sethi %hi(trap_setup), %l4
+ * jmpl %l4 + %lo(trap_setup), %l6
+ * mov 1, %l4
+ */
+
+ /* 2 3 4 window number
+ * -----
+ * O T S mnemonic
+ *
+ * O == Current window before trap
+ * T == Window entered when trap occurred
+ * S == Window we will need to save if (1<<T) == %wim
+ *
+ * Before execution gets here, it must be guarenteed that
+ * %l0 contains trap time %psr, %l1 and %l2 contain the
+ * trap pc and npc, and %l3 contains the trap time %wim.
+ */
+
+ .globl trap_setup, tsetup_patch1, tsetup_patch2
+ .globl tsetup_patch3, tsetup_patch4
+ .globl tsetup_patch5, tsetup_patch6
+trap_setup:
+ /* Calculate mask of trap window. See if from user
+ * or kernel and branch conditionally.
+ */
+ mov 1, %t_twinmask
+ sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
+ andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
+ be trap_setup_from_user ! nope, from user mode
+ nop
+
+ /* From kernel, allocate more kernel stack and
+ * build a pt_regs trap frame.
+ */
+ sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
+ STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, t_wim, g2)
+
+ /* See if we are in the trap window. */
+ andcc %t_twinmask, %t_wim, %g0
+ be 1f
+ nop
+
+ b trap_setup_kernel_spill ! in trap window, clean up
+ nop
+
+ /* Trap from kernel with a window available.
+ * Just do it...
+ */
+1:
+ mov %t_kstack, %sp ! jump onto new stack
+ jmpl %t_retpc + 0x8, %g0 ! return to caller
+ nop
+
+trap_setup_kernel_spill:
+ LOAD_CURRENT(curptr)
+ ld [%curptr + THREAD_UMASK], %g1
+ orcc %g0, %g1, %g0
+ bne trap_setup_user_spill ! there are some user windows, yuck
+ nop
+
+ /* Spill from kernel, but only kernel windows, adjust
+ * %wim and go.
+ */
+ srl %t_wim, 0x1, %g2 ! begin computation of new %wim
+tsetup_patch1: sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
+ or %t_wim, %g2, %g2
+tsetup_patch2: and %g2, 0xff, %g2 ! patched on 7 window Sparcs
+
+ save %g0, %g0, %g0
+
+ /* Set new %wim value */
+ wr %g2, 0x0, %wim
+ WRITE_PAUSE
+
+ /* Save the kernel window onto the corresponding stack. */
+ STORE_WINDOW(sp)
+
+ restore %g0, %g0, %g0
+
+ mov %t_kstack, %sp ! and onto new kernel stack
+ jmpl %t_retpc + 0x8, %g0 ! return to caller
+ nop
+
+trap_setup_from_user:
+ /* We can't use %curptr yet. */
+ LOAD_CURRENT(t_kstack)
+ ld [%t_kstack + TASK_KSTACK_PG], %t_kstack
+
+ /* Build pt_regs frame. */
+ add %t_kstack, (PAGE_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %t_kstack
+ STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, t_wim, g2)
+
+ /* Clear current->tss.w_saved */
+ LOAD_CURRENT(curptr)
+ st %g0, [%curptr + THREAD_W_SAVED]
+
+ /* See if we are in the trap window. */
+ andcc %t_twinmask, %t_wim, %g0
+ bne trap_setup_user_spill ! yep we are
+ orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
+
+ /* Trap from user, but not into the invalid window.
+ * Calculate new umask. The way this works is,
+ * any window from the %wim at trap time until
+ * the window right before the one we are in now,
+ * is a user window. A diagram:
+ *
+ * 7 6 5 4 3 2 1 0 window number
+ * ---------------
+ * I L T mnemonic
+ *
+ * Window 'I' is the invalid window in our example,
+ * window 'L' is the window the user was in when
+ * the trap occurred, window T is the trap window
+ * we are in now. So therefore, windows 5, 4 and
+ * 3 are user windows. The following sequence
+ * computes the user winmask to represent this.
+ */
+ subcc %t_wim, %t_twinmask, %g2
+ bneg,a 1f
+ sub %g2, 0x1, %g2
+1:
+ andn %g2, %t_twinmask, %g2
+tsetup_patch3: and %g2, 0xff, %g2 ! patched on 7win Sparcs
+ st %g2, [%curptr + THREAD_UMASK] ! store new umask
+
+ mov %t_kstack, %sp ! and onto kernel stack
+ jmpl %t_retpc + 0x8, %g0 ! return to caller
+ nop
+
+trap_setup_user_spill:
+ /* A spill occured from either kernel or user mode
+ * and there exist some user windows to deal with.
+ * A mask of the currently valid user windows
+ * is in %g1 upon entry to here.
+ */
+
+tsetup_patch4: and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
+ srl %t_wim, 0x1, %g2 ! compute new %wim
+tsetup_patch5: sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
+ or %t_wim, %g2, %g2 ! %g2 is new %wim
+tsetup_patch6: and %g2, 0xff, %g2 ! patched on 7win Sparcs
+ andn %g1, %g2, %g1 ! clear this bit in %g1
+ st %g1, [%curptr + THREAD_UMASK]
+
+ save %g0, %g0, %g0
+
+ wr %g2, 0x0, %wim
+ WRITE_PAUSE
+
+ /* Call MMU-architecture dependant stack checking
+ * routine.
+ */
+ .globl C_LABEL(tsetup_mmu_patchme)
+C_LABEL(tsetup_mmu_patchme): b C_LABEL(tsetup_sun4c_stackchk)
+ andcc %sp, 0x7, %g0
+
+trap_setup_user_stack_is_bolixed:
+ /* From user/kernel into invalid window w/bad user
+ * stack. Save bad user stack, and return to caller.
+ */
+ SAVE_BOLIXED_USER_STACK(curptr, g3)
+ restore %g0, %g0, %g0
+ mov %t_kstack, %sp
+ jmpl %t_retpc + 0x8, %g0
+ nop
+
+trap_setup_good_ustack:
+ STORE_WINDOW(sp)
+
+trap_setup_finish_up:
+ restore %g0, %g0, %g0
+ mov %t_kstack, %sp
+ jmpl %t_retpc + 0x8, %g0
+ nop
+
+ /* Architecture specific stack checking routines. When either
+ * of these routines are called, the globals are free to use
+ * as they have been safely stashed on the new kernel stack
+ * pointer. Thus the definition below for simplicity.
+ */
+#define glob_tmp g1
+
+ .globl C_LABEL(tsetup_sun4c_stackchk)
+ .globl C_LABEL(tsetup_srmmu_stackchk)
+C_LABEL(tsetup_sun4c_stackchk):
+ /* Done by caller: andcc %sp, 0x7, %g0 */
+ be 1f
+ sra %sp, 29, %glob_tmp
+
+ b trap_setup_user_stack_is_bolixed
+ nop
+
+1:
+ add %glob_tmp, 0x1, %glob_tmp
+ andncc %glob_tmp, 0x1, %g0
+ be 1f
+ and %sp, 0xfff, %glob_tmp ! delay slot
+
+ b trap_setup_user_stack_is_bolixed
+ nop
+
+ /* See if our dump area will be on more than one
+ * page.
+ */
+1:
+ add %glob_tmp, 0x38, %glob_tmp
+ andncc %glob_tmp, 0xff8, %g0
+ be tsetup_sun4c_onepage ! only one page to check
+ lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
+
+tsetup_sun4c_twopages:
+ /* Is first page ok permission wise? */
+ srl %glob_tmp, 29, %glob_tmp
+ cmp %glob_tmp, 0x6
+ be 1f
+ add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
+
+ b trap_setup_user_stack_is_bolixed
+ nop
+
+1:
+ sra %glob_tmp, 29, %glob_tmp
+ add %glob_tmp, 0x1, %glob_tmp
+ andncc %glob_tmp, 0x1, %g0
+ be 1f
+ add %sp, 0x38, %glob_tmp
+
+ b trap_setup_user_stack_is_bolixed
+ nop
+
+1:
+ lda [%glob_tmp] ASI_PTE, %glob_tmp
+
+tsetup_sun4c_onepage:
+ srl %glob_tmp, 29, %glob_tmp
+ cmp %glob_tmp, 0x6 ! can user write to it?
+ be trap_setup_good_ustack ! success
+ nop
+
+ b trap_setup_user_stack_is_bolixed
+ nop
+
+C_LABEL(tsetup_srmmu_stackchk):
+ /* Check results of callers andcc %sp, 0x7, %g0 */
+ bne trap_setup_user_stack_is_bolixed
+ sethi %hi(KERNBASE), %glob_tmp
+ cmp %glob_tmp, %sp
+ bleu trap_setup_user_stack_is_bolixed
+ nop
+
+ /* Clear the fault status and turn on the no_fault bit. */
+ mov AC_M_SFSR, %glob_tmp ! delay from above...
+ lda [%glob_tmp] ASI_M_MMUREGS, %g0 ! eat SFSR
+ lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
+ or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
+ sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
+
+ /* Dump the registers and cross fingers. */
+ STORE_WINDOW(sp)
+
+ /* Clear the no_fault bit and check the status. */
+ andn %glob_tmp, 0x2, %glob_tmp
+ sta %glob_tmp, [%g0] ASI_M_MMUREGS
+ mov AC_M_SFAR, %glob_tmp
+ lda [%glob_tmp] ASI_M_MMUREGS, %g0
+ mov AC_M_SFSR, %glob_tmp
+ lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp
+ andcc %glob_tmp, 0x2, %g0 ! did we fault?
+ be trap_setup_finish_up ! cool beans, success
+ nop
+
+ b trap_setup_user_stack_is_bolixed ! we faulted, ugh
+ nop
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov
with Sam's (original) version of this