#include <linux/random.h>
#include <linux/kallsyms.h>
#include <linux/profile.h>
-#include <linux/bitops.h>
#include <asm/uaccess.h>
+#include <asm/bitops.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/pgtable.h>
extern void iSeries_smp_message_recv( struct pt_regs * );
#endif
-extern irq_desc_t irq_desc[NR_IRQS];
+static void register_irq_proc (unsigned int irq);
+
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
int __irq_offset_value;
int ppc_spurious_interrupts;
unsigned long lpevent_count;
+int
+setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ unsigned long flags;
+ struct irqaction *old, **p;
+ irq_desc_t *desc = get_irq_desc(irq);
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ desc->depth = 0;
+ desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
+ if (desc->handler && desc->handler->startup)
+ desc->handler->startup(irq);
+ unmask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ register_irq_proc(irq);
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+
+inline void synchronize_irq(unsigned int irq)
+{
+ while (get_irq_desc(irq)->status & IRQ_INPROGRESS)
+ cpu_relax();
+}
+
+EXPORT_SYMBOL(synchronize_irq);
+
+#endif /* CONFIG_SMP */
+
+int request_irq(unsigned int irq,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char * devname, void *dev_id)
+{
+ struct irqaction *action;
+ int retval;
+
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)
+ kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action) {
+ printk(KERN_ERR "kmalloc() failed for irq %d !\n", irq);
+ return -ENOMEM;
+ }
+
+ action->handler = handler;
+ action->flags = irqflags;
+ cpus_clear(action->mask);
+ action->name = devname;
+ action->dev_id = dev_id;
+ action->next = NULL;
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ irq_desc_t *desc = get_irq_desc(irq);
+ struct irqaction **p;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ mask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ /* Wait to make sure it's not being used on another CPU */
+ synchronize_irq(irq);
+ kfree(action);
+ return;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ break;
+ }
+ return;
+}
+
+EXPORT_SYMBOL(free_irq);
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+
+/**
+ * disable_irq_nosync - disable an irq without waiting
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables of an interrupt
+ * stack. Unlike disable_irq(), this function does not ensure existing
+ * instances of the IRQ handler have completed before returning.
+ *
+ * This function may be called from IRQ context.
+ */
+
+inline void disable_irq_nosync(unsigned int irq)
+{
+ irq_desc_t *desc = get_irq_desc(irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ if (!(desc->status & IRQ_PER_CPU))
+ desc->status |= IRQ_DISABLED;
+ mask_irq(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(disable_irq_nosync);
+
+/**
+ * disable_irq - disable an irq and wait for completion
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Disables of an interrupt
+ * stack. That is for two disables you need two enables. This
+ * function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+
+void disable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = get_irq_desc(irq);
+ disable_irq_nosync(irq);
+ if (desc->action)
+ synchronize_irq(irq);
+}
+
+EXPORT_SYMBOL(disable_irq);
+
+/**
+ * enable_irq - enable interrupt handling on an irq
+ * @irq: Interrupt to enable
+ *
+ * Re-enables the processing of interrupts on this IRQ line
+ * providing no disable_irq calls are now in effect.
+ *
+ * This function may be called from IRQ context.
+ */
+
+void enable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = get_irq_desc(irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
+ case 1: {
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+ desc->status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
+ }
+ unmask_irq(irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ break;
+ case 0:
+ printk("enable_irq(%u) unbalanced from %p\n", irq,
+ __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+EXPORT_SYMBOL(enable_irq);
+
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
return 0;
}
-extern int noirqdebug;
+int handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
+{
+ int status = 0;
+ int ret, retval = 0;
+
+ if (!(action->flags & SA_INTERRUPT))
+ local_irq_enable();
+
+ do {
+ ret = action->handler(irq, action->dev_id, regs);
+ if (ret == IRQ_HANDLED)
+ status |= action->flags;
+ retval |= ret;
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ local_irq_disable();
+ return retval;
+}
+
+static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ struct irqaction *action;
+
+ if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
+ printk(KERN_ERR "irq event %d: bogus return value %x\n",
+ irq, action_ret);
+ } else {
+ printk(KERN_ERR "irq %d: nobody cared!\n", irq);
+ }
+ dump_stack();
+ printk(KERN_ERR "handlers:\n");
+ action = desc->action;
+ do {
+ printk(KERN_ERR "[<%p>]", action->handler);
+ print_symbol(" (%s)",
+ (unsigned long)action->handler);
+ printk("\n");
+ action = action->next;
+ } while (action);
+}
+
+static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ static int count = 100;
+
+ if (count) {
+ count--;
+ __report_bad_irq(irq, desc, action_ret);
+ }
+}
+
+static int noirqdebug;
+
+static int __init noirqdebug_setup(char *str)
+{
+ noirqdebug = 1;
+ printk("IRQ lockup detection disabled\n");
+ return 1;
+}
+
+__setup("noirqdebug", noirqdebug_setup);
+
+/*
+ * If 99,900 of the previous 100,000 interrupts have not been handled then
+ * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
+ * turn the IRQ off.
+ *
+ * (The other 100-of-100,000 interrupts may have been a correctly-functioning
+ * device sharing an IRQ with the failing one)
+ *
+ * Called under desc->lock
+ */
+static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
+{
+ if (action_ret != IRQ_HANDLED) {
+ desc->irqs_unhandled++;
+ if (action_ret != IRQ_NONE)
+ report_bad_irq(irq, desc, action_ret);
+ }
+
+ desc->irq_count++;
+ if (desc->irq_count < 100000)
+ return;
+
+ desc->irq_count = 0;
+ if (desc->irqs_unhandled > 99900) {
+ /*
+ * The interrupt is stuck
+ */
+ __report_bad_irq(irq, desc, action_ret);
+ /*
+ * Now kill the IRQ
+ */
+ printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ desc->irqs_unhandled = 0;
+}
/*
* Eventually, this should take an array of interrupts and an array size
if (desc->status & IRQ_PER_CPU) {
/* no locking required for CPU-local interrupts: */
ack_irq(irq);
- action_ret = handle_IRQ_event(irq, regs, desc->action);
+ action_ret = handle_irq_event(irq, regs, desc->action);
desc->handler->end(irq);
return;
}
if (curtp != irqtp) {
irqtp->task = curtp->task;
irqtp->flags = 0;
- action_ret = call_handle_IRQ_event(irq, regs, action, irqtp);
+ action_ret = call_handle_irq_event(irq, regs, action, irqtp);
irqtp->task = NULL;
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
} else
#endif
- action_ret = handle_IRQ_event(irq, regs, action);
+ action_ret = handle_irq_event(irq, regs, action);
spin_lock(&desc->lock);
if (!noirqdebug)
- note_interrupt(irq, desc, action_ret, regs);
+ note_interrupt(irq, desc, action_ret);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
}
#endif /* CONFIG_PPC_ISERIES */
+unsigned long probe_irq_on (void)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+int probe_irq_off (unsigned long irqs)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+unsigned int probe_irq_mask(unsigned long irqs)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_mask);
+
void __init init_IRQ(void)
{
static int once = 0;
irq_ctx_init();
}
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NR_IRQS];
+static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+
+/* Protected by get_irq_desc(irq)->lock. */
+#ifdef CONFIG_IRQ_ALL_CPUS
+cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+#else /* CONFIG_IRQ_ALL_CPUS */
+cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_NONE };
+#endif /* CONFIG_IRQ_ALL_CPUS */
+
+static int irq_affinity_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
+ if (count - len < 2)
+ return -EINVAL;
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ unsigned int irq = (long)data;
+ irq_desc_t *desc = get_irq_desc(irq);
+ int ret;
+ cpumask_t new_value, tmp;
+
+ if (!desc->handler->set_affinity)
+ return -EIO;
+
+ ret = cpumask_parse(buffer, count, new_value);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * We check for CPU_MASK_ALL in xics to send irqs to all cpus.
+ * In some cases CPU_MASK_ALL is smaller than the cpumask (eg
+ * NR_CPUS == 32 and cpumask is a long), so we mask it here to
+ * be consistent.
+ */
+ cpus_and(new_value, new_value, CPU_MASK_ALL);
+
+ /*
+ * Grab lock here so cpu_online_map can't change, and also
+ * protect irq_affinity[].
+ */
+ spin_lock(&desc->lock);
+
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ cpus_and(tmp, new_value, cpu_online_map);
+ if (cpus_empty(tmp)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ irq_affinity[irq] = new_value;
+ desc->handler->set_affinity(irq, new_value);
+ ret = count;
+
+out:
+ spin_unlock(&desc->lock);
+ return ret;
+}
+
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+ struct proc_dir_entry *entry;
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
+ return;
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+ /* create /proc/irq/1234/smp_affinity */
+ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = (void *)(long)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+ }
+
+ smp_affinity_entry[irq] = entry;
+}
+
+void init_irq_proc (void)
+{
+ int i;
+
+ /* create /proc/irq */
+ root_irq_dir = proc_mkdir("irq", NULL);
+
+ /* create /proc/irq/prof_cpu_mask */
+ create_prof_cpu_mask(root_irq_dir);
+
+ /*
+ * Create entries for all existing IRQs.
+ */
+ for_each_irq(i) {
+ if (get_irq_desc(i)->handler == NULL)
+ continue;
+ register_irq_proc(i);
+ }
+}
+
+irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs)
+{
+ return IRQ_NONE;
+}
+
#ifndef CONFIG_PPC_ISERIES
/*
* Virtual IRQ mapping code, used on systems with XICS interrupt controllers.