patch-2.4.7 linux/arch/ppc/kernel/process.c
Next file: linux/arch/ppc/kernel/prom.c
Previous file: linux/arch/ppc/kernel/prep_setup.c
Back to the patch index
Back to the overall index
- Lines: 181
- Date:
Thu Jul 19 09:39:44 2001
- Orig file:
v2.4.6/linux/arch/ppc/kernel/process.c
- Orig date:
Tue Jul 3 17:08:18 2001
diff -u --recursive --new-file v2.4.6/linux/arch/ppc/kernel/process.c linux/arch/ppc/kernel/process.c
@@ -1,5 +1,5 @@
/*
- * BK Id: SCCS/s.process.c 1.19 06/15/01 13:56:56 paulus
+ * BK Id: SCCS/s.process.c 1.23 07/19/01 23:02:48 paulus
*/
/*
* linux/arch/ppc/kernel/process.c
@@ -62,8 +62,8 @@
char *sysmap = NULL;
unsigned long sysmap_size = 0;
-#undef SHOW_TASK_SWITCHES 1
-#undef CHECK_STACK 1
+#undef SHOW_TASK_SWITCHES
+#undef CHECK_STACK
#if defined(CHECK_STACK)
unsigned long
@@ -163,7 +163,6 @@
#else
giveup_altivec(last_task_used_altivec);
#endif /* __SMP __ */
- printk("MSR_VEC in enable_altivec_kernel\n");
}
#endif /* CONFIG_ALTIVEC */
@@ -203,12 +202,6 @@
check_stack(new);
#endif
-#ifdef SHOW_TASK_SWITCHES
- printk("%s/%d -> %s/%d NIP %08lx cpu %d root %x/%x\n",
- prev->comm,prev->pid,
- new->comm,new->pid,new->thread.regs->nip,new->processor,
- new->fs->root,prev->fs->root);
-#endif
#ifdef CONFIG_SMP
/* avoid complexity of lazy save/restore of fpu
* by just saving it every time we switch out if
@@ -241,7 +234,7 @@
/* Avoid the trap. On smp this this never happens since
* we don't set last_task_used_altivec -- Cort
*/
- if ( last_task_used_altivec == new )
+ if (new->thread.regs && last_task_used_altivec == new)
new->thread.regs->msr |= MSR_VEC;
new_thread = &new->thread;
old_thread = ¤t->thread;
@@ -329,39 +322,42 @@
int
copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused,
- struct task_struct * p, struct pt_regs * regs)
+ struct task_struct *p, struct pt_regs *regs)
{
unsigned long msr;
- struct pt_regs * childregs, *kregs;
+ struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
-
+ unsigned long sp = (unsigned long)p + sizeof(union task_union);
+ unsigned long childframe;
+
/* Copy registers */
- childregs = ((struct pt_regs *)
- ((unsigned long)p + sizeof(union task_union)
- - STACK_FRAME_OVERHEAD)) - 2;
+ sp -= sizeof(struct pt_regs);
+ childregs = (struct pt_regs *) sp;
*childregs = *regs;
- if ((childregs->msr & MSR_PR) == 0)
- childregs->gpr[2] = (unsigned long) p; /* `current' in new task */
+ if ((childregs->msr & MSR_PR) == 0) {
+ /* for kernel thread, set `current' and stackptr in new task */
+ childregs->gpr[1] = sp + sizeof(struct pt_regs);
+ childregs->gpr[2] = (unsigned long) p;
+ }
childregs->gpr[3] = 0; /* Result from fork() */
p->thread.regs = childregs;
- p->thread.ksp = (unsigned long) childregs - STACK_FRAME_OVERHEAD;
- p->thread.ksp -= sizeof(struct pt_regs ) + STACK_FRAME_OVERHEAD;
- kregs = (struct pt_regs *)(p->thread.ksp + STACK_FRAME_OVERHEAD);
+ sp -= STACK_FRAME_OVERHEAD;
+ childframe = sp;
+
+ /*
+ * The way this works is that at some point in the future
+ * some task will call _switch to switch to the new task.
+ * That will pop off the stack frame created below and start
+ * the new task running at ret_from_fork. The new task will
+ * do some house keeping and then return from the fork or clone
+ * system call, using the stack frame created above.
+ */
+ sp -= sizeof(struct pt_regs);
+ kregs = (struct pt_regs *) sp;
+ sp -= STACK_FRAME_OVERHEAD;
+ p->thread.ksp = sp;
kregs->nip = (unsigned long)ret_from_fork;
- asm volatile("mfmsr %0" : "=r" (msr):);
- kregs->msr = msr;
- kregs->gpr[1] = (unsigned long)childregs - STACK_FRAME_OVERHEAD;
- kregs->gpr[2] = (unsigned long)p;
-
- if (usp >= (unsigned long) regs) {
- /* Stack is in kernel space - must adjust */
- childregs->gpr[1] = (unsigned long)(childregs + 1);
- } else {
- /* Provided stack is in user space */
- childregs->gpr[1] = usp;
- }
- p->thread.last_syscall = -1;
-
+
/*
* copy fpu info - assume lazy fpu switch now always
* -- Cort
@@ -379,12 +375,13 @@
*/
if (regs->msr & MSR_VEC)
giveup_altivec(current);
-
memcpy(&p->thread.vr, ¤t->thread.vr, sizeof(p->thread.vr));
p->thread.vscr = current->thread.vscr;
childregs->msr &= ~MSR_VEC;
#endif /* CONFIG_ALTIVEC */
+ p->thread.last_syscall = -1;
+
return 0;
}
@@ -394,6 +391,8 @@
void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
{
set_fs(USER_DS);
+ memset(regs->gpr, 0, sizeof(regs->gpr));
+ memset(®s->ctr, 0, 5 * sizeof(regs->ctr));
regs->nip = nip;
regs->gpr[1] = sp;
regs->msr = MSR_USER;
@@ -407,38 +406,13 @@
int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
- unsigned long clone_flags = p1;
- int res;
- lock_kernel();
- res = do_fork(clone_flags, regs->gpr[1], regs, 0);
-#ifdef CONFIG_SMP
- /* When we clone the idle task we keep the same pid but
- * the return value of 0 for both causes problems.
- * -- Cort
- */
- if ((current->pid == 0) && (current == &init_task))
- res = 1;
-#endif /* CONFIG_SMP */
- unlock_kernel();
- return res;
+ return do_fork(p1, regs->gpr[1], regs, 0);
}
int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
struct pt_regs *regs)
{
-
- int res;
-
- res = do_fork(SIGCHLD, regs->gpr[1], regs, 0);
-#ifdef CONFIG_SMP
- /* When we clone the idle task we keep the same pid but
- * the return value of 0 for both causes problems.
- * -- Cort
- */
- if ((current->pid == 0) && (current == &init_task))
- res = 1;
-#endif /* CONFIG_SMP */
- return res;
+ return do_fork(SIGCHLD, regs->gpr[1], regs, 0);
}
int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)