patch-2.4.17 linux/arch/sparc64/kernel/smp.c
Next file: linux/arch/sparc64/kernel/sparc64_ksyms.c
Previous file: linux/arch/sparc64/kernel/semaphore.c
Back to the patch index
Back to the overall index
- Lines: 158
- Date:
Fri Dec 21 16:40:32 2001
- Orig file:
linux-2.4.16/arch/sparc64/kernel/smp.c
- Orig date:
Wed Nov 21 18:31:09 2001
diff -Naur -X /home/marcelo/lib/dontdiff linux-2.4.16/arch/sparc64/kernel/smp.c linux/arch/sparc64/kernel/smp.c
@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
+#include <linux/cache.h>
#include <asm/head.h>
#include <asm/ptrace.h>
@@ -39,17 +40,17 @@
extern void calibrate_delay(void);
extern unsigned prom_cpu_nodes[];
-struct cpuinfo_sparc cpu_data[NR_CPUS] __attribute__ ((aligned (64)));
+cpuinfo_sparc cpu_data[NR_CPUS];
-volatile int __cpu_number_map[NR_CPUS] __attribute__ ((aligned (64)));
-volatile int __cpu_logical_map[NR_CPUS] __attribute__ ((aligned (64)));
+volatile int __cpu_number_map[NR_CPUS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
+volatile int __cpu_logical_map[NR_CPUS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
/* Please don't make this stuff initdata!!! --DaveM */
static unsigned char boot_cpu_id = 0;
static int smp_activated = 0;
/* Kernel spinlock */
-spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
+spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
volatile int smp_processors_ready = 0;
unsigned long cpu_present_map = 0;
@@ -137,6 +138,15 @@
{
int cpuid = hard_smp_processor_id();
unsigned long pstate;
+ extern int bigkernel;
+ extern unsigned long kern_locked_tte_data;
+
+ if (bigkernel) {
+ prom_dtlb_load(sparc64_highest_locked_tlbent()-1,
+ kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
+ prom_itlb_load(sparc64_highest_locked_tlbent()-1,
+ kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
+ }
inherit_locked_prom_mappings(0);
@@ -223,7 +233,6 @@
{
trap_init();
init_IRQ();
- smp_callin();
return cpu_idle();
}
@@ -276,7 +285,7 @@
init_tasks[cpucount] = p;
p->processor = i;
- p->cpus_runnable = 1 << i; /* we schedule the first task manually */
+ p->cpus_runnable = 1UL << i; /* we schedule the first task manually */
del_from_runqueue(p);
unhash_process(p);
@@ -482,7 +491,7 @@
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: : "r" (pstate));
- if ((stuck & ~(0x5555555555555555UL)) == 0) {
+ if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
/* Busy bits will not clear, continue instead
* of freezing up on this cpu.
*/
@@ -542,6 +551,9 @@
int wait;
};
+static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+static struct call_data_struct *call_data;
+
extern unsigned long xcall_call_function;
int smp_call_function(void (*func)(void *info), void *info,
@@ -549,6 +561,7 @@
{
struct call_data_struct data;
int cpus = smp_num_cpus - 1;
+ long timeout;
if (!cpus)
return 0;
@@ -558,19 +571,36 @@
atomic_set(&data.finished, 0);
data.wait = wait;
- smp_cross_call(&xcall_call_function,
- 0, (u64) &data, 0);
+ spin_lock_bh(&call_lock);
+
+ call_data = &data;
+
+ smp_cross_call(&xcall_call_function, 0, 0, 0);
+
/*
* Wait for other cpus to complete function or at
* least snap the call data.
*/
- while (atomic_read(&data.finished) != cpus)
+ timeout = 1000000;
+ while (atomic_read(&data.finished) != cpus) {
+ if (--timeout <= 0)
+ goto out_timeout;
barrier();
+ udelay(1);
+ }
+
+ spin_unlock_bh(&call_lock);
+
+ return 0;
+out_timeout:
+ spin_unlock_bh(&call_lock);
+ printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
+ smp_num_cpus - 1, atomic_read(&data.finished));
return 0;
}
-void smp_call_function_client(struct call_data_struct *call_data)
+void smp_call_function_client(void)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
@@ -597,7 +627,7 @@
extern unsigned long xcall_flush_dcache_page_cheetah;
extern unsigned long xcall_flush_dcache_page_spitfire;
-#ifdef DCFLUSH_DEBUG
+#ifdef CONFIG_DEBUG_DCFLUSH
extern atomic_t dcpage_flushes;
extern atomic_t dcpage_flushes_xcall;
#endif
@@ -620,7 +650,7 @@
if (smp_processors_ready) {
unsigned long mask = 1UL << cpu;
-#ifdef DCFLUSH_DEBUG
+#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
#endif
if (cpu == smp_processor_id()) {
@@ -642,7 +672,7 @@
__pa(page->virtual),
0, mask);
}
-#ifdef DCFLUSH_DEBUG
+#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
#endif
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)