patch-2.4.19 linux-2.4.19/arch/mips/kernel/irq.c
Next file: linux-2.4.19/arch/mips/kernel/irq_cpu.c
Previous file: linux-2.4.19/arch/mips/kernel/irixinv.c
Back to the patch index
Back to the overall index
- Lines: 434
- Date:
Fri Aug 2 17:39:43 2002
- Orig file:
linux-2.4.18/arch/mips/kernel/irq.c
- Orig date:
Sun Sep 9 10:43:01 2001
diff -urN linux-2.4.18/arch/mips/kernel/irq.c linux-2.4.19/arch/mips/kernel/irq.c
@@ -8,17 +8,22 @@
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
*/
+#include <linux/config.h>
#include <linux/kernel.h>
-#include <linux/irq.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <asm/atomic.h>
#include <asm/system.h>
+#include <asm/uaccess.h>
/*
* Controller mappings for all interrupt sources:
@@ -63,7 +68,7 @@
end_none
};
-volatile unsigned long irq_err_count, spurious_count;
+atomic_t irq_err_count;
/*
* Generic, controller-independent functions:
@@ -71,13 +76,13 @@
int get_irq_list(char *buf)
{
+ int i, j;
struct irqaction * action;
char *p = buf;
- int i;
p += sprintf(p, " ");
- for (i=0; i < 1 /*smp_num_cpus*/; i++)
- p += sprintf(p, "CPU%d ", i);
+ for (j=0; j<smp_num_cpus; j++)
+ p += sprintf(p, "CPU%d ",j);
*p++ = '\n';
for (i = 0 ; i < NR_IRQS ; i++) {
@@ -85,7 +90,13 @@
if (!action)
continue;
p += sprintf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
p += sprintf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j = 0; j < smp_num_cpus; j++)
+ p += sprintf(p, "%10u ",
+ kstat.irqs[cpu_logical_map(j)][i]);
+#endif
p += sprintf(p, " %14s", irq_desc[i].handler->typename);
p += sprintf(p, " %s", action->name);
@@ -93,10 +104,182 @@
p += sprintf(p, ", %s", action->name);
*p++ = '\n';
}
- p += sprintf(p, "ERR: %10lu\n", irq_err_count);
+ p += sprintf(p, "\n");
+ p += sprintf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
return p - buf;
}
+#ifdef CONFIG_SMP
+int global_irq_holder = NO_PROC_ID;
+spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * Most of this code is take from the mips64 tree (ip27-irq.c). It's virtually
+ * identical to the i386 implentation in arh/i386/irq.c, with translations for
+ * the interrupt enable bit
+ */
+
+#define MAXCOUNT 100000000
+#define SYNC_OTHER_CORES(x) udelay(x+1)
+
+static inline void wait_on_irq(int cpu)
+{
+ int count = MAXCOUNT;
+
+ for (;;) {
+
+ /*
+ * Wait until all interrupts are gone. Wait
+ * for bottom half handlers unless we're
+ * already executing in one..
+ */
+ if (!irqs_running())
+ if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
+ break;
+
+ /* Duh, we have to loop. Release the lock to avoid deadlocks */
+ spin_unlock(&global_irq_lock);
+
+ for (;;) {
+ if (!--count) {
+ printk("Count spun out. Huh?\n");
+ count = ~0;
+ }
+ __sti();
+ SYNC_OTHER_CORES(cpu);
+ __cli();
+ if (irqs_running())
+ continue;
+ if (spin_is_locked(&global_irq_lock))
+ continue;
+ if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
+ continue;
+ if (spin_trylock(&global_irq_lock))
+ break;
+ }
+ }
+}
+
+/*
+ * This is called when we want to synchronize with
+ * interrupts. We may for example tell a device to
+ * stop sending interrupts: but to make sure there
+ * are no interrupts that are executing on another
+ * CPU we need to call this function.
+ */
+void synchronize_irq(void)
+{
+ if (irqs_running()) {
+ /* Stupid approach */
+ cli();
+ sti();
+ }
+}
+
+static inline void get_irqlock(int cpu)
+{
+ if (!spin_trylock(&global_irq_lock)) {
+ /* do we already hold the lock? */
+ if ((unsigned char) cpu == global_irq_holder)
+ return;
+ /* Uhhuh.. Somebody else got it. Wait.. */
+ spin_lock(&global_irq_lock);
+ }
+ /*
+ * We also to make sure that nobody else is running
+ * in an interrupt context.
+ */
+ wait_on_irq(cpu);
+
+ /*
+ * Ok, finally..
+ */
+ global_irq_holder = cpu;
+}
+
+/*
+ * A global "cli()" while in an interrupt context turns into just a local
+ * cli(). Interrupts should use spinlocks for the (very unlikely) case that
+ * they ever want to protect against each other.
+ *
+ * If we already have local interrupts disabled, this will not turn a local
+ * disable into a global one (problems with spinlocks: this makes
+ * save_flags+cli+sti usable inside a spinlock).
+ */
+
+void __global_cli(void)
+{
+ unsigned int flags;
+
+ __save_flags(flags);
+ if (flags & ST0_IE) {
+ int cpu = smp_processor_id();
+ __cli();
+ if (!local_irq_count(cpu))
+ get_irqlock(cpu);
+ }
+}
+
+void __global_sti(void)
+{
+ int cpu = smp_processor_id();
+
+ if (!local_irq_count(cpu))
+ release_irqlock(cpu);
+ __sti();
+}
+
+/*
+ * SMP flags value to restore to:
+ * 0 - global cli
+ * 1 - global sti
+ * 2 - local cli
+ * 3 - local sti
+ */
+unsigned long __global_save_flags(void)
+{
+ int retval;
+ int local_enabled;
+ unsigned long flags;
+ int cpu = smp_processor_id();
+
+ __save_flags(flags);
+ local_enabled = (flags & ST0_IE);
+ /* default to local */
+ retval = 2 + local_enabled;
+
+ /* check for global flags if we're not in an interrupt */
+ if (!local_irq_count(cpu)) {
+ if (local_enabled)
+ retval = 1;
+ if (global_irq_holder == cpu)
+ retval = 0;
+ }
+
+ return retval;
+}
+
+void __global_restore_flags(unsigned long flags)
+{
+ switch (flags) {
+ case 0:
+ __global_cli();
+ break;
+ case 1:
+ __global_sti();
+ break;
+ case 2:
+ __cli();
+ break;
+ case 3:
+ __sti();
+ break;
+ default:
+ printk("global_restore_flags: %08lx\n", flags);
+ }
+}
+#endif /* CONFIG_SMP */
+
/*
* This should really return information about whether
* we should do bottom half handling etc. Right now we
@@ -690,7 +873,7 @@
}
spin_unlock_irqrestore(&desc->lock,flags);
- /* register_irq_proc(irq); */
+ register_irq_proc(irq);
return 0;
}
@@ -705,3 +888,176 @@
irq_desc[i].handler = &no_irq_type;
}
}
+
+EXPORT_SYMBOL(disable_irq_nosync);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(probe_irq_mask);
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NR_IRQS];
+
+#define HEX_DIGITS 8
+
+static unsigned int parse_hex_value (const char *buffer,
+ unsigned long count, unsigned long *ret)
+{
+ unsigned char hexnum [HEX_DIGITS];
+ unsigned long value;
+ int i;
+
+ if (!count)
+ return -EINVAL;
+ if (count > HEX_DIGITS)
+ count = HEX_DIGITS;
+ if (copy_from_user(hexnum, buffer, count))
+ return -EFAULT;
+
+ /*
+ * Parse the first 8 characters as a hex string, any non-hex char
+ * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
+ */
+ value = 0;
+
+ for (i = 0; i < count; i++) {
+ unsigned int c = hexnum[i];
+
+ switch (c) {
+ case '0' ... '9': c -= '0'; break;
+ case 'a' ... 'f': c -= 'a'-10; break;
+ case 'A' ... 'F': c -= 'A'-10; break;
+ default:
+ goto out;
+ }
+ value = (value << 4) | c;
+ }
+out:
+ *ret = value;
+ return 0;
+}
+
+#if CONFIG_SMP
+
+static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+
+static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
+static int irq_affinity_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ if (count < HEX_DIGITS+1)
+ return -EINVAL;
+ return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
+}
+
+static int irq_affinity_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ int irq = (long) data, full_count = count, err;
+ unsigned long new_value;
+
+ if (!irq_desc[irq].handler->set_affinity)
+ return -EIO;
+
+ err = parse_hex_value(buffer, count, &new_value);
+
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ if (!(new_value & cpu_online_map))
+ return -EINVAL;
+
+ irq_affinity[irq] = new_value;
+ irq_desc[irq].handler->set_affinity(irq, new_value);
+
+ return full_count;
+}
+
+#endif
+
+static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ unsigned long *mask = (unsigned long *) data;
+ if (count < HEX_DIGITS+1)
+ return -EINVAL;
+ return sprintf (page, "%08lx\n", *mask);
+}
+
+static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ unsigned long *mask = (unsigned long *) data, full_count = count, err;
+ unsigned long new_value;
+
+ err = parse_hex_value(buffer, count, &new_value);
+ if (err)
+ return err;
+
+ *mask = new_value;
+ return full_count;
+}
+
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
+ irq_dir[irq])
+ return;
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+#if CONFIG_SMP
+ {
+ struct proc_dir_entry *entry;
+
+ /* create /proc/irq/1234/smp_affinity */
+ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = (void *)(long)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+ }
+
+ smp_affinity_entry[irq] = entry;
+ }
+#endif
+}
+
+unsigned long prof_cpu_mask = -1;
+
+void init_irq_proc (void)
+{
+ struct proc_dir_entry *entry;
+ int i;
+
+ /* create /proc/irq */
+ root_irq_dir = proc_mkdir("irq", 0);
+
+ /* create /proc/irq/prof_cpu_mask */
+ entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
+
+ if (!entry)
+ return;
+
+ entry->nlink = 1;
+ entry->data = (void *)&prof_cpu_mask;
+ entry->read_proc = prof_cpu_mask_read_proc;
+ entry->write_proc = prof_cpu_mask_write_proc;
+
+ /*
+ * Create entries for all existing IRQs.
+ */
+ for (i = 0; i < NR_IRQS; i++)
+ register_irq_proc(i);
+}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)