patch-2.4.4 linux/arch/ia64/kernel/smp.c
Next file: linux/arch/ia64/kernel/smpboot.c
Previous file: linux/arch/ia64/kernel/signal.c
Back to the patch index
Back to the overall index
- Lines: 592
- Date:
Thu Apr 5 12:51:47 2001
- Orig file:
v2.4.3/linux/arch/ia64/kernel/smp.c
- Orig date:
Thu Jan 4 12:50:17 2001
diff -u --recursive --new-file v2.4.3/linux/arch/ia64/kernel/smp.c linux/arch/ia64/kernel/smp.c
@@ -2,8 +2,8 @@
* SMP Support
*
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- *
+ * Copyright (C) 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
* Lots of stuff stolen from arch/alpha/kernel/smp.c
*
* 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy calibration on each CPU.
@@ -37,8 +37,8 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
@@ -50,9 +50,8 @@
extern void machine_halt(void);
extern void start_ap(void);
-extern int cpu_now_booting; /* Used by head.S to find idle task */
-extern volatile unsigned long cpu_online_map; /* Bitmap of available cpu's */
-extern struct cpuinfo_ia64 cpu_data[NR_CPUS]; /* Duh... */
+extern int cpu_now_booting; /* used by head.S to find idle task */
+extern volatile unsigned long cpu_online_map; /* bitmap of available cpu's */
struct smp_boot_data smp_boot_data __initdata;
@@ -60,18 +59,19 @@
char __initdata no_int_routing;
+/* don't make this a CPU-local variable: it's used for IPIs, mostly... */
+int __cpu_physical_id[NR_CPUS]; /* logical ID -> physical CPU ID map */
+
unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
-volatile int __cpu_physical_id[NR_CPUS] = { -1, }; /* Logical ID -> SAPIC ID */
-int smp_num_cpus = 1;
-volatile int smp_threads_ready; /* Set when the idlers are all forked */
-cycles_t cacheflush_time;
-unsigned long ap_wakeup_vector = -1; /* External Int to use to wakeup AP's */
+int smp_num_cpus = 1;
+volatile int smp_threads_ready; /* set when the idlers are all forked */
+unsigned long ap_wakeup_vector; /* external Int to use to wakeup AP's */
static volatile unsigned long cpu_callin_map;
static volatile int smp_commenced;
-static int max_cpus = -1; /* Command line */
-static unsigned long ipi_op[NR_CPUS];
+static int max_cpus = -1; /* command line */
+
struct smp_call_struct {
void (*func) (void *info);
void *info;
@@ -99,7 +99,8 @@
* SMP mode to <NUM>.
*/
-static int __init nosmp(char *str)
+static int __init
+nosmp (char *str)
{
max_cpus = 0;
return 1;
@@ -107,7 +108,8 @@
__setup("nosmp", nosmp);
-static int __init maxcpus(char *str)
+static int __init
+maxcpus (char *str)
{
get_option(&str, &max_cpus);
return 1;
@@ -116,7 +118,7 @@
__setup("maxcpus=", maxcpus);
static int __init
-nointroute(char *str)
+nointroute (char *str)
{
no_int_routing = 1;
return 1;
@@ -125,21 +127,20 @@
__setup("nointroute", nointroute);
/*
- * Yoink this CPU from the runnable list...
+ * Yoink this CPU from the runnable list...
*/
void
-halt_processor(void)
+halt_processor (void)
{
- clear_bit(smp_processor_id(), &cpu_online_map);
+ clear_bit(smp_processor_id(), &cpu_online_map);
max_xtp();
__cli();
- for (;;)
+ for (;;)
;
-
}
static inline int
-pointer_lock(void *lock, void *data, int retry)
+pointer_lock (void *lock, void *data, int retry)
{
volatile long *ptr = lock;
again:
@@ -156,14 +157,13 @@
}
void
-handle_IPI(int irq, void *dev_id, struct pt_regs *regs)
+handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
{
- int this_cpu = smp_processor_id();
- unsigned long *pending_ipis = &ipi_op[this_cpu];
+ unsigned long *pending_ipis = &local_cpu_data->ipi_operation;
unsigned long ops;
/* Count this now; we may make a call that never returns. */
- cpu_data[this_cpu].ipi_count++;
+ local_cpu_data->ipi_count++;
mb(); /* Order interrupt and bit testing. */
while ((ops = xchg(pending_ipis, 0)) != 0) {
@@ -173,16 +173,16 @@
which = ffz(~ops);
ops &= ~(1 << which);
-
+
switch (which) {
case IPI_RESCHEDULE:
- /*
- * Reschedule callback. Everything to be done is done by the
- * interrupt return path.
+ /*
+ * Reschedule callback. Everything to be done is done by the
+ * interrupt return path.
*/
break;
-
- case IPI_CALL_FUNC:
+
+ case IPI_CALL_FUNC:
{
struct smp_call_struct *data;
void (*func)(void *info);
@@ -203,7 +203,7 @@
/* Notify the sending CPU that the task is done. */
mb();
- if (wait)
+ if (wait)
atomic_dec(&data->unfinished_count);
}
break;
@@ -214,7 +214,7 @@
#ifndef CONFIG_ITANIUM_PTCG
case IPI_FLUSH_TLB:
- {
+ {
extern unsigned long flush_start, flush_end, flush_nbits, flush_rid;
extern atomic_t flush_cpu_count;
unsigned long saved_rid = ia64_get_rr(flush_start);
@@ -223,6 +223,8 @@
unsigned long nbits = flush_nbits;
/*
+ * Current CPU may be running with different RID so we need to
+ * reload the RID of flushed address.
* Current CPU may be running with different
* RID so we need to reload the RID of flushed
* address. Purging the translation also
@@ -235,7 +237,7 @@
ia64_set_rr(flush_start, flush_rid);
ia64_srlz_d();
}
-
+
do {
/*
* Purge local TLB entries.
@@ -258,7 +260,8 @@
#endif /* !CONFIG_ITANIUM_PTCG */
default:
- printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
+ printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
+ smp_processor_id(), which);
break;
} /* Switch */
} while (ops);
@@ -268,21 +271,21 @@
}
static inline void
-send_IPI_single (int dest_cpu, int op)
+send_IPI_single (int dest_cpu, int op)
{
-
- if (dest_cpu == -1)
- return;
-
- set_bit(op, &ipi_op[dest_cpu]);
- platform_send_ipi(dest_cpu, IPI_IRQ, IA64_IPI_DM_INT, 0);
+
+ if (dest_cpu == -1)
+ return;
+
+ set_bit(op, &cpu_data[dest_cpu].ipi_operation);
+ platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
}
static inline void
-send_IPI_allbutself(int op)
+send_IPI_allbutself (int op)
{
int i;
-
+
for (i = 0; i < smp_num_cpus; i++) {
if (i != smp_processor_id())
send_IPI_single(i, op);
@@ -290,7 +293,7 @@
}
static inline void
-send_IPI_all(int op)
+send_IPI_all (int op)
{
int i;
@@ -299,30 +302,42 @@
}
static inline void
-send_IPI_self(int op)
+send_IPI_self (int op)
{
send_IPI_single(smp_processor_id(), op);
}
void
-smp_send_reschedule(int cpu)
+smp_send_reschedule (int cpu)
{
send_IPI_single(cpu, IPI_RESCHEDULE);
}
void
-smp_send_stop(void)
+smp_send_stop (void)
{
send_IPI_allbutself(IPI_CPU_STOP);
}
#ifndef CONFIG_ITANIUM_PTCG
+
void
-smp_send_flush_tlb(void)
+smp_send_flush_tlb (void)
{
send_IPI_allbutself(IPI_FLUSH_TLB);
}
-#endif /* !CONFIG_ITANIUM_PTCG */
+
+void
+smp_resend_flush_tlb(void)
+{
+ /*
+ * Really need a null IPI but since this rarely should happen & since this code
+ * will go away, lets not add one.
+ */
+ send_IPI_allbutself(IPI_RESCHEDULE);
+}
+
+#endif /* !CONFIG_ITANIUM_PTCG */
/*
* Run a function on another CPU
@@ -347,7 +362,7 @@
printk(__FUNCTION__" trying to call self\n");
return -EBUSY;
}
-
+
data.func = func;
data.info = info;
data.wait = wait;
@@ -392,7 +407,6 @@
* Does not return until remote CPUs are nearly ready to execute <func>
* or are or have executed.
*/
-
int
smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
{
@@ -402,7 +416,7 @@
if (cpus == 0)
return 0;
-
+
data.func = func;
data.info = info;
data.wait = wait;
@@ -425,7 +439,7 @@
int i;
for (i = 0; i < smp_num_cpus; i++) {
if (i != smp_processor_id())
- platform_send_ipi(i, IPI_IRQ, IA64_IPI_DM_INT, 0);
+ platform_send_ipi(i, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
}
goto retry;
#else
@@ -446,7 +460,7 @@
* want to ensure all TLB's flushed before proceeding.
*/
void
-smp_flush_tlb_all(void)
+smp_flush_tlb_all (void)
{
smp_call_function((void (*)(void *))__flush_tlb_all, NULL, 1, 1);
__flush_tlb_all();
@@ -456,21 +470,19 @@
* Ideally sets up per-cpu profiling hooks. Doesn't do much now...
*/
static inline void __init
-smp_setup_percpu_timer(int cpuid)
+smp_setup_percpu_timer(void)
{
- cpu_data[cpuid].prof_counter = 1;
- cpu_data[cpuid].prof_multiplier = 1;
+ local_cpu_data->prof_counter = 1;
+ local_cpu_data->prof_multiplier = 1;
}
-void
-smp_do_timer(struct pt_regs *regs)
+void
+smp_do_timer (struct pt_regs *regs)
{
- int cpu = smp_processor_id();
- int user = user_mode(regs);
- struct cpuinfo_ia64 *data = &cpu_data[cpu];
+ int user = user_mode(regs);
- if (--data->prof_counter <= 0) {
- data->prof_counter = data->prof_multiplier;
+ if (--local_cpu_data->prof_counter <= 0) {
+ local_cpu_data->prof_counter = local_cpu_data->prof_multiplier;
update_process_times(user);
}
}
@@ -480,7 +492,7 @@
* AP's start using C here.
*/
void __init
-smp_callin (void)
+smp_callin (void)
{
extern void ia64_rid_init(void);
extern void ia64_init_itm(void);
@@ -493,12 +505,12 @@
if (test_and_set_bit(cpu, &cpu_online_map)) {
printk("CPU#%d already initialized!\n", cpu);
machine_halt();
- }
+ }
efi_map_pal_code();
cpu_init();
- smp_setup_percpu_timer(cpu);
+ smp_setup_percpu_timer();
/* setup the CPU local timer tick */
ia64_init_itm();
@@ -506,15 +518,10 @@
#ifdef CONFIG_PERFMON
perfmon_init_percpu();
#endif
-
- /* Disable all local interrupts */
- ia64_set_lrr0(0, 1);
- ia64_set_lrr1(0, 1);
-
local_irq_enable(); /* Interrupts have been off until now */
calibrate_delay();
- my_cpu_data.loops_per_jiffy = loops_per_jiffy;
+ local_cpu_data->loops_per_jiffy = loops_per_jiffy;
/* allow the master to continue */
set_bit(cpu, &cpu_callin_map);
@@ -531,8 +538,8 @@
* path in which case the new idle task could get scheduled before we
* had a chance to remove it from the run-queue...
*/
-static int __init
-fork_by_hand(void)
+static int __init
+fork_by_hand (void)
{
/*
* Don't care about the usp and regs settings since we'll never
@@ -545,22 +552,22 @@
* Bring one cpu online. Return 0 if this fails for any reason.
*/
static int __init
-smp_boot_one_cpu(int cpu)
+smp_boot_one_cpu (int cpu)
{
struct task_struct *idle;
int cpu_phys_id = cpu_physical_id(cpu);
long timeout;
- /*
+ /*
* Create an idle task for this CPU. Note that the address we
* give to kernel_thread is irrelevant -- it's going to start
* where OS_BOOT_RENDEVZ vector in SAL says to start. But
* this gets all the other task-y sort of data structures set
- * up like we wish. We need to pull the just created idle task
- * off the run queue and stuff it into the init_tasks[] array.
+ * up like we wish. We need to pull the just created idle task
+ * off the run queue and stuff it into the init_tasks[] array.
* Sheesh . . .
*/
- if (fork_by_hand() < 0)
+ if (fork_by_hand() < 0)
panic("failed fork for CPU 0x%x", cpu_phys_id);
/*
* We remove it from the pidhash and the runqueue
@@ -571,7 +578,7 @@
panic("No idle process for CPU 0x%x", cpu_phys_id);
init_tasks[cpu] = idle;
del_from_runqueue(idle);
- unhash_process(idle);
+ unhash_process(idle);
/* Schedule the first task manually. */
idle->processor = cpu;
@@ -590,50 +597,41 @@
udelay(100);
}
- printk(KERN_ERR "SMP: Processor 0x%x is stuck.\n", cpu_phys_id);
+ printk(KERN_ERR "SMP: CPU 0x%x is stuck\n", cpu_phys_id);
return 0;
}
/*
- * Called by smp_init bring all the secondaries online and hold them.
- * XXX: this is ACPI specific; it uses "magic" variables exported from acpi.c
- * to 'discover' the AP's. Blech.
+ * Called by smp_init bring all the secondaries online and hold them.
*/
void __init
-smp_boot_cpus(void)
+smp_boot_cpus (void)
{
int i, cpu_count = 1;
unsigned long bogosum;
- /* Take care of some initial bookkeeping. */
- memset(&__cpu_physical_id, -1, sizeof(__cpu_physical_id));
- memset(&ipi_op, 0, sizeof(ipi_op));
-
- /* Setup BP mappings */
- __cpu_physical_id[0] = hard_smp_processor_id();
-
/* on the BP, the kernel already called calibrate_delay_loop() in init/main.c */
- my_cpu_data.loops_per_jiffy = loops_per_jiffy;
+ local_cpu_data->loops_per_jiffy = loops_per_jiffy;
#if 0
smp_tune_scheduling();
#endif
- smp_setup_percpu_timer(0);
+ smp_setup_percpu_timer();
if (test_and_set_bit(0, &cpu_online_map)) {
printk("CPU#%d already initialized!\n", smp_processor_id());
machine_halt();
- }
+ }
init_idle();
/* Nothing to do when told not to. */
if (max_cpus == 0) {
- printk(KERN_INFO "SMP mode deactivated.\n");
+ printk(KERN_INFO "SMP mode deactivated.\n");
return;
}
- if (max_cpus != -1)
+ if (max_cpus != -1)
printk("Limiting CPUs to %d\n", max_cpus);
if (smp_boot_data.cpu_count > 1) {
@@ -650,7 +648,7 @@
continue; /* failed */
cpu_count++; /* Count good CPUs only... */
- /*
+ /*
* Bail if we've started as many CPUS as we've been told to.
*/
if (cpu_count == max_cpus)
@@ -663,10 +661,10 @@
}
bogosum = 0;
- for (i = 0; i < NR_CPUS; i++) {
+ for (i = 0; i < NR_CPUS; i++) {
if (cpu_online_map & (1L << i))
bogosum += cpu_data[i].loops_per_jiffy;
- }
+ }
printk(KERN_INFO "SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
cpu_count, bogosum*HZ/500000, (bogosum*HZ/5000) % 100);
@@ -674,31 +672,31 @@
smp_num_cpus = cpu_count;
}
-/*
+/*
* Called when the BP is just about to fire off init.
*/
-void __init
-smp_commence(void)
+void __init
+smp_commence (void)
{
smp_commenced = 1;
}
int __init
-setup_profiling_timer(unsigned int multiplier)
+setup_profiling_timer (unsigned int multiplier)
{
- return -EINVAL;
+ return -EINVAL;
}
/*
* Assume that CPU's have been discovered by some platform-dependant
* interface. For SoftSDV/Lion, that would be ACPI.
*
- * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
+ * Setup of the IPI irq handler is done in irq.c:init_IRQ().
*
* This also registers the AP OS_MC_REDVEZ address with SAL.
*/
void __init
-init_smp_config(void)
+init_smp_config (void)
{
struct fptr {
unsigned long fp;
@@ -708,14 +706,13 @@
/* Tell SAL where to drop the AP's. */
ap_startup = (struct fptr *) start_ap;
- sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
- __pa(ap_startup->fp), __pa(ap_startup->gp), 0,
- 0, 0, 0);
+ sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, __pa(ap_startup->fp),
+ __pa(ap_startup->gp), 0, 0, 0, 0);
if (sal_ret < 0) {
printk("SMP: Can't set SAL AP Boot Rendezvous: %s\n", ia64_sal_strerror(sal_ret));
printk(" Forcing UP mode\n");
max_cpus = 0;
- smp_num_cpus = 1;
+ smp_num_cpus = 1;
}
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)