2 * linux/arch/ia64/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
14 * 4/14/2004: Added code to handle cpu migration and do safe irq
15 * migration without lossing interrupts for iosapic
20 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
22 * IRQs are in fact implemented a bit like signal handlers for the kernel.
23 * Naturally it's not a 1:1 relation, but there are similarities.
26 #include <linux/config.h>
27 #include <linux/errno.h>
28 #include <linux/module.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/ioport.h>
32 #include <linux/interrupt.h>
33 #include <linux/timex.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/cpu.h>
37 #include <linux/ctype.h>
38 #include <linux/smp_lock.h>
39 #include <linux/init.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/irq.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/kallsyms.h>
45 #include <linux/notifier.h>
47 #include <asm/atomic.h>
51 #include <asm/system.h>
52 #include <asm/bitops.h>
53 #include <asm/uaccess.h>
54 #include <asm/pgalloc.h>
55 #include <asm/tlbflush.h>
56 #include <asm/delay.h>
61 * Linux has a controller-independent x86 interrupt architecture.
62 * every controller has a 'controller-template', that is used
63 * by the main code to do the right thing. Each driver-visible
64 * interrupt source is transparently wired to the appropriate
65 * controller. Thus drivers need not be aware of the
66 * interrupt-controller.
68 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
69 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
70 * (IO-APICs assumed to be messaging to Pentium local-APICs)
72 * the code is designed to be easily extended with new/different
73 * interrupt controllers, without having to do assembly magic.
77 * Controller mappings for all interrupt sources:
79 irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned = {
81 .status = IRQ_DISABLED,
82 .handler = &no_irq_type,
83 .lock = SPIN_LOCK_UNLOCKED
88 * This is updated when the user sets irq affinity via /proc
90 cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
91 static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)];
93 #ifdef CONFIG_IA64_GENERIC
94 irq_desc_t * __ia64_irq_desc (unsigned int irq)
96 return _irq_desc + irq;
99 ia64_vector __ia64_irq_to_vector (unsigned int irq)
101 return (ia64_vector) irq;
104 unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
106 return (unsigned int) vec;
110 static void register_irq_proc (unsigned int irq);
113 * Special irq handlers.
116 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
120 * Generic no controller code
123 static void enable_none(unsigned int irq) { }
124 static unsigned int startup_none(unsigned int irq) { return 0; }
125 static void disable_none(unsigned int irq) { }
126 static void ack_none(unsigned int irq)
129 * 'what should we do if we get a hw irq event on an illegal vector'.
130 * each architecture has to answer this themselves, it doesn't deserve
131 * a generic callback i think.
134 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
135 #ifdef CONFIG_X86_LOCAL_APIC
137 * Currently unexpected vectors happen only on SMP and APIC.
138 * We _must_ ack these because every local APIC has only N
139 * irq slots per priority level, and a 'hanging, unacked' IRQ
140 * holds up an irq slot - in excessive cases (when multiple
141 * unexpected vectors occur) that might lock up the APIC
148 printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
152 /* startup is the same as "enable", shutdown is same as "disable" */
153 #define shutdown_none disable_none
154 #define end_none enable_none
156 struct hw_interrupt_type no_irq_type = {
166 atomic_t irq_err_count;
167 #ifdef CONFIG_X86_IO_APIC
168 #ifdef APIC_MISMATCH_DEBUG
169 atomic_t irq_mis_count;
174 * Generic, controller-independent functions:
177 int show_interrupts(struct seq_file *p, void *v)
179 int j, i = *(loff_t *) v;
180 struct irqaction * action;
186 for (j=0; j<NR_CPUS; j++)
188 seq_printf(p, "CPU%d ",j);
193 idesc = irq_descp(i);
194 spin_lock_irqsave(&idesc->lock, flags);
195 action = idesc->action;
198 seq_printf(p, "%3d: ",i);
200 seq_printf(p, "%10u ", kstat_irqs(i));
202 for (j = 0; j < NR_CPUS; j++)
204 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
206 seq_printf(p, " %14s", idesc->handler->typename);
207 seq_printf(p, " %s", action->name);
209 for (action=action->next; action; action = action->next)
210 seq_printf(p, ", %s", action->name);
214 spin_unlock_irqrestore(&idesc->lock, flags);
215 } else if (i == NR_IRQS) {
216 seq_puts(p, "NMI: ");
217 for (j = 0; j < NR_CPUS; j++)
219 seq_printf(p, "%10u ", nmi_count(j));
221 #ifdef CONFIG_X86_LOCAL_APIC
222 seq_puts(p, "LOC: ");
223 for (j = 0; j < NR_CPUS; j++)
225 seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
228 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
229 #ifdef CONFIG_X86_IO_APIC
230 #ifdef APIC_MISMATCH_DEBUG
231 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
239 inline void synchronize_irq(unsigned int irq)
241 while (irq_descp(irq)->status & IRQ_INPROGRESS)
244 EXPORT_SYMBOL(synchronize_irq);
248 * This should really return information about whether
249 * we should do bottom half handling etc. Right now we
250 * end up _always_ checking the bottom half, which is a
251 * waste of time and is not what some drivers would
254 int handle_IRQ_event(unsigned int irq,
255 struct pt_regs *regs, struct irqaction *action)
257 int status = 1; /* Force the "do bottom halves" bit */
260 if (!(action->flags & SA_INTERRUPT))
264 status |= action->flags;
265 retval |= action->handler(irq, action->dev_id, regs);
266 action = action->next;
268 if (status & SA_SAMPLE_RANDOM)
269 add_interrupt_randomness(irq);
274 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
276 struct irqaction *action;
278 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
279 printk(KERN_ERR "irq event %d: bogus return value %x\n",
282 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
285 printk(KERN_ERR "handlers:\n");
286 action = desc->action;
288 printk(KERN_ERR "[<%p>]", action->handler);
289 print_symbol(" (%s)",
290 (unsigned long)action->handler);
292 action = action->next;
296 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
298 static int count = 100;
302 __report_bad_irq(irq, desc, action_ret);
306 static int noirqdebug;
308 static int __init noirqdebug_setup(char *str)
311 printk("IRQ lockup detection disabled\n");
315 __setup("noirqdebug", noirqdebug_setup);
318 * If 99,900 of the previous 100,000 interrupts have not been handled then
319 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
322 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
323 * device sharing an IRQ with the failing one)
325 * Called under desc->lock
327 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
329 if (action_ret != IRQ_HANDLED) {
330 desc->irqs_unhandled++;
331 if (action_ret != IRQ_NONE)
332 report_bad_irq(irq, desc, action_ret);
336 if (desc->irq_count < 100000)
340 if (desc->irqs_unhandled > 99900) {
342 * The interrupt is stuck
344 __report_bad_irq(irq, desc, action_ret);
348 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
349 desc->status |= IRQ_DISABLED;
350 desc->handler->disable(irq);
352 desc->irqs_unhandled = 0;
356 * Generic enable/disable code: this just calls
357 * down into the PIC-specific version for the actual
358 * hardware disable after having gotten the irq
363 * disable_irq_nosync - disable an irq without waiting
364 * @irq: Interrupt to disable
366 * Disable the selected interrupt line. Disables and Enables are
368 * Unlike disable_irq(), this function does not ensure existing
369 * instances of the IRQ handler have completed before returning.
371 * This function may be called from IRQ context.
374 inline void disable_irq_nosync(unsigned int irq)
376 irq_desc_t *desc = irq_descp(irq);
379 spin_lock_irqsave(&desc->lock, flags);
380 if (!desc->depth++) {
381 desc->status |= IRQ_DISABLED;
382 desc->handler->disable(irq);
384 spin_unlock_irqrestore(&desc->lock, flags);
386 EXPORT_SYMBOL(disable_irq_nosync);
389 * disable_irq - disable an irq and wait for completion
390 * @irq: Interrupt to disable
392 * Disable the selected interrupt line. Enables and Disables are
394 * This function waits for any pending IRQ handlers for this interrupt
395 * to complete before returning. If you use this function while
396 * holding a resource the IRQ handler may need you will deadlock.
398 * This function may be called - with care - from IRQ context.
401 void disable_irq(unsigned int irq)
403 irq_desc_t *desc = irq_descp(irq);
405 disable_irq_nosync(irq);
407 synchronize_irq(irq);
409 EXPORT_SYMBOL(disable_irq);
412 * enable_irq - enable handling of an irq
413 * @irq: Interrupt to enable
415 * Undoes the effect of one call to disable_irq(). If this
416 * matches the last disable, processing of interrupts on this
417 * IRQ line is re-enabled.
419 * This function may be called from IRQ context.
422 void enable_irq(unsigned int irq)
424 irq_desc_t *desc = irq_descp(irq);
427 spin_lock_irqsave(&desc->lock, flags);
428 switch (desc->depth) {
430 unsigned int status = desc->status & ~IRQ_DISABLED;
431 desc->status = status;
432 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
433 desc->status = status | IRQ_REPLAY;
434 hw_resend_irq(desc->handler,irq);
436 desc->handler->enable(irq);
443 printk(KERN_ERR "enable_irq(%u) unbalanced from %p\n",
444 irq, (void *) __builtin_return_address(0));
446 spin_unlock_irqrestore(&desc->lock, flags);
448 EXPORT_SYMBOL(enable_irq);
451 * do_IRQ handles all normal device IRQ's (the special
452 * SMP cross-CPU interrupts have their own specific
455 unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
458 * We ack quickly, we don't want the irq controller
459 * thinking we're snobs just because some other CPU has
460 * disabled global interrupts (we have already done the
461 * INT_ACK cycles, it's too late to try to pretend to the
462 * controller that we aren't taking the interrupt).
464 * 0 return value means that this irq is already being
465 * handled by some other CPU. (or is disabled)
467 irq_desc_t *desc = irq_descp(irq);
468 struct irqaction * action;
469 irqreturn_t action_ret;
473 cpu = smp_processor_id(); /* for CONFIG_PREEMPT, this must come after irq_enter()! */
475 kstat_cpu(cpu).irqs[irq]++;
477 if (desc->status & IRQ_PER_CPU) {
478 /* no locking required for CPU-local interrupts: */
479 desc->handler->ack(irq);
480 action_ret = handle_IRQ_event(irq, regs, desc->action);
481 desc->handler->end(irq);
483 spin_lock(&desc->lock);
484 desc->handler->ack(irq);
486 * REPLAY is when Linux resends an IRQ that was dropped earlier
487 * WAITING is used by probe to mark irqs that are being tested
489 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
490 status |= IRQ_PENDING; /* we _want_ to handle it */
493 * If the IRQ is disabled for whatever reason, we cannot
494 * use the action we have.
497 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
498 action = desc->action;
499 status &= ~IRQ_PENDING; /* we commit to handling */
500 status |= IRQ_INPROGRESS; /* we are handling it */
502 desc->status = status;
505 * If there is no IRQ handler or it was disabled, exit early.
506 * Since we set PENDING, if another processor is handling
507 * a different instance of this same irq, the other processor
508 * will take care of it.
510 if (unlikely(!action))
514 * Edge triggered interrupts need to remember
516 * This applies to any hw interrupts that allow a second
517 * instance of the same irq to arrive while we are in do_IRQ
518 * or in the handler. But the code here only handles the _second_
519 * instance of the irq, not the third or fourth. So it is mostly
520 * useful for irq hardware that does not mask cleanly in an
524 spin_unlock(&desc->lock);
525 action_ret = handle_IRQ_event(irq, regs, action);
526 spin_lock(&desc->lock);
528 note_interrupt(irq, desc, action_ret);
529 if (!(desc->status & IRQ_PENDING))
531 desc->status &= ~IRQ_PENDING;
533 desc->status &= ~IRQ_INPROGRESS;
536 * The ->end() handler has to deal with interrupts which got
537 * disabled while the handler was running.
539 desc->handler->end(irq);
540 spin_unlock(&desc->lock);
546 * request_irq - allocate an interrupt line
547 * @irq: Interrupt line to allocate
548 * @handler: Function to be called when the IRQ occurs
549 * @irqflags: Interrupt type flags
550 * @devname: An ascii name for the claiming device
551 * @dev_id: A cookie passed back to the handler function
553 * This call allocates interrupt resources and enables the
554 * interrupt line and IRQ handling. From the point this
555 * call is made your handler function may be invoked. Since
556 * your handler function must clear any interrupt the board
557 * raises, you must take care both to initialise your hardware
558 * and to set up the interrupt handler in the right order.
560 * Dev_id must be globally unique. Normally the address of the
561 * device data structure is used as the cookie. Since the handler
562 * receives this value it makes sense to use it.
564 * If your interrupt is shared you must pass a non NULL dev_id
565 * as this is required when freeing the interrupt.
569 * SA_SHIRQ Interrupt is shared
571 * SA_INTERRUPT Disable local interrupts while processing
573 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
577 int request_irq(unsigned int irq,
578 irqreturn_t (*handler)(int, void *, struct pt_regs *),
579 unsigned long irqflags,
580 const char * devname,
584 struct irqaction * action;
588 * Sanity-check: shared interrupts should REALLY pass in
589 * a real dev-ID, otherwise we'll have trouble later trying
590 * to figure out which interrupt is which (messes up the
591 * interrupt freeing logic etc).
593 if (irqflags & SA_SHIRQ) {
595 printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
604 action = (struct irqaction *)
605 kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
609 action->handler = handler;
610 action->flags = irqflags;
611 cpus_clear(action->mask);
612 action->name = devname;
614 action->dev_id = dev_id;
616 retval = setup_irq(irq, action);
622 EXPORT_SYMBOL(request_irq);
625 * free_irq - free an interrupt
626 * @irq: Interrupt line to free
627 * @dev_id: Device identity to free
629 * Remove an interrupt handler. The handler is removed and if the
630 * interrupt line is no longer in use by any driver it is disabled.
631 * On a shared IRQ the caller must ensure the interrupt is disabled
632 * on the card it drives before calling this function. The function
633 * does not return until any executing interrupts for this IRQ
636 * This function must not be called from interrupt context.
639 void free_irq(unsigned int irq, void *dev_id)
642 struct irqaction **p;
648 desc = irq_descp(irq);
649 spin_lock_irqsave(&desc->lock,flags);
652 struct irqaction * action = *p;
654 struct irqaction **pp = p;
656 if (action->dev_id != dev_id)
659 /* Found it - now remove it from the list of entries */
662 desc->status |= IRQ_DISABLED;
663 desc->handler->shutdown(irq);
665 spin_unlock_irqrestore(&desc->lock,flags);
667 /* Wait to make sure it's not being used on another CPU */
668 synchronize_irq(irq);
672 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
673 spin_unlock_irqrestore(&desc->lock,flags);
678 EXPORT_SYMBOL(free_irq);
681 * IRQ autodetection code..
683 * This depends on the fact that any interrupt that
684 * comes in on to an unassigned handler will get stuck
685 * with "IRQ_WAITING" cleared and the interrupt
689 static DECLARE_MUTEX(probe_sem);
692 * probe_irq_on - begin an interrupt autodetect
694 * Commence probing for an interrupt. The interrupts are scanned
695 * and a mask of potential interrupt lines is returned.
699 unsigned long probe_irq_on(void)
708 * something may have generated an irq long ago and we want to
709 * flush such a longstanding irq before considering it as spurious.
711 for (i = NR_IRQS-1; i > 0; i--) {
714 spin_lock_irq(&desc->lock);
716 desc->handler->startup(i);
717 spin_unlock_irq(&desc->lock);
720 /* Wait for longstanding interrupts to trigger. */
721 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
722 /* about 20ms delay */ barrier();
725 * enable any unassigned irqs
726 * (we must startup again here because if a longstanding irq
727 * happened in the previous stage, it may have masked itself)
729 for (i = NR_IRQS-1; i > 0; i--) {
732 spin_lock_irq(&desc->lock);
734 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
735 if (desc->handler->startup(i))
736 desc->status |= IRQ_PENDING;
738 spin_unlock_irq(&desc->lock);
742 * Wait for spurious interrupts to trigger
744 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
745 /* about 100ms delay */ barrier();
748 * Now filter out any obviously spurious interrupts
751 for (i = 0; i < NR_IRQS; i++) {
752 irq_desc_t *desc = irq_descp(i);
755 spin_lock_irq(&desc->lock);
756 status = desc->status;
758 if (status & IRQ_AUTODETECT) {
759 /* It triggered already - consider it spurious. */
760 if (!(status & IRQ_WAITING)) {
761 desc->status = status & ~IRQ_AUTODETECT;
762 desc->handler->shutdown(i);
767 spin_unlock_irq(&desc->lock);
773 EXPORT_SYMBOL(probe_irq_on);
776 * probe_irq_mask - scan a bitmap of interrupt lines
777 * @val: mask of interrupts to consider
779 * Scan the ISA bus interrupt lines and return a bitmap of
780 * active interrupts. The interrupt probe logic state is then
781 * returned to its previous value.
783 * Note: we need to scan all the irq's even though we will
784 * only return ISA irq numbers - just so that we reset them
785 * all to a known state.
787 unsigned int probe_irq_mask(unsigned long val)
793 for (i = 0; i < 16; i++) {
794 irq_desc_t *desc = irq_descp(i);
797 spin_lock_irq(&desc->lock);
798 status = desc->status;
800 if (status & IRQ_AUTODETECT) {
801 if (!(status & IRQ_WAITING))
804 desc->status = status & ~IRQ_AUTODETECT;
805 desc->handler->shutdown(i);
807 spin_unlock_irq(&desc->lock);
813 EXPORT_SYMBOL(probe_irq_mask);
816 * probe_irq_off - end an interrupt autodetect
817 * @val: mask of potential interrupts (unused)
819 * Scans the unused interrupt lines and returns the line which
820 * appears to have triggered the interrupt. If no interrupt was
821 * found then zero is returned. If more than one interrupt is
822 * found then minus the first candidate is returned to indicate
825 * The interrupt probe logic state is returned to its previous
828 * BUGS: When used in a module (which arguably shouldn't happen)
829 * nothing prevents two IRQ probe callers from overlapping. The
830 * results of this are non-optimal.
833 int probe_irq_off(unsigned long val)
835 int i, irq_found, nr_irqs;
839 for (i = 0; i < NR_IRQS; i++) {
840 irq_desc_t *desc = irq_descp(i);
843 spin_lock_irq(&desc->lock);
844 status = desc->status;
846 if (status & IRQ_AUTODETECT) {
847 if (!(status & IRQ_WAITING)) {
852 desc->status = status & ~IRQ_AUTODETECT;
853 desc->handler->shutdown(i);
855 spin_unlock_irq(&desc->lock);
860 irq_found = -irq_found;
864 EXPORT_SYMBOL(probe_irq_off);
866 int setup_irq(unsigned int irq, struct irqaction * new)
870 struct irqaction *old, **p;
871 irq_desc_t *desc = irq_descp(irq);
873 if (desc->handler == &no_irq_type)
876 * Some drivers like serial.c use request_irq() heavily,
877 * so we have to be careful not to interfere with a
880 if (new->flags & SA_SAMPLE_RANDOM) {
882 * This function might sleep, we want to call it first,
883 * outside of the atomic block.
884 * Yes, this might clear the entropy pool if the wrong
885 * driver is attempted to be loaded, without actually
886 * installing a new handler, but is this really a problem,
887 * only the sysadmin is able to do this.
889 rand_initialize_irq(irq);
892 if (new->flags & SA_PERCPU_IRQ) {
893 desc->status |= IRQ_PER_CPU;
894 desc->handler = &irq_type_ia64_lsapic;
898 * The following block of code has to be executed atomically
900 spin_lock_irqsave(&desc->lock,flags);
902 if ((old = *p) != NULL) {
903 /* Can't share interrupts unless both agree to */
904 if (!(old->flags & new->flags & SA_SHIRQ)) {
905 spin_unlock_irqrestore(&desc->lock,flags);
909 /* add new interrupt at end of irq queue */
921 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
922 desc->handler->startup(irq);
924 spin_unlock_irqrestore(&desc->lock,flags);
926 register_irq_proc(irq);
930 static struct proc_dir_entry * root_irq_dir;
931 static struct proc_dir_entry * irq_dir [NR_IRQS];
935 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
937 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
939 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
941 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
943 cpumask_t mask = CPU_MASK_NONE;
945 cpu_set(cpu_logical_id(hwid), mask);
948 irq_affinity[irq] = mask;
949 irq_redir[irq] = (char) (redir & 0xff);
953 static int irq_affinity_read_proc (char *page, char **start, off_t off,
954 int count, int *eof, void *data)
956 int len = sprintf(page, "%s", irq_redir[(long)data] ? "r " : "");
958 len += cpumask_scnprintf(page+len, count, irq_affinity[(long)data]);
961 len += sprintf(page + len, "\n");
965 static int irq_affinity_write_proc (struct file *file, const char *buffer,
966 unsigned long count, void *data)
968 unsigned int irq = (unsigned long) data;
969 int full_count = count, err;
970 cpumask_t new_value, tmp;
971 # define R_PREFIX_LEN 16
972 char rbuf[R_PREFIX_LEN];
975 irq_desc_t *desc = irq_descp(irq);
979 if (!desc->handler->set_affinity)
983 * If string being written starts with a prefix of 'r' or 'R'
984 * and some limited number of spaces, set IA64_IRQ_REDIRECTED.
985 * If more than (R_PREFIX_LEN - 2) spaces are passed, they won't
986 * all be trimmed as part of prelen, the untrimmed spaces will
987 * cause the hex parsing to fail, and this write() syscall will
993 rlen = min(sizeof(rbuf)-1, count);
994 if (copy_from_user(rbuf, buffer, rlen))
998 if (tolower(*rbuf) == 'r') {
999 prelen = strspn(rbuf, "Rr ");
1003 err = cpumask_parse(buffer+prelen, count-prelen, new_value);
1008 * Do not allow disabling IRQs completely - it's a too easy
1009 * way to make the system unusable accidentally :-) At least
1010 * one online CPU still has to be targeted.
1012 cpus_and(tmp, new_value, cpu_online_map);
1013 if (cpus_empty(tmp))
1016 spin_lock_irqsave(&desc->lock, flags);
1017 pending_irq_cpumask[irq] = new_value;
1019 set_bit(irq, pending_irq_redir);
1021 clear_bit(irq, pending_irq_redir);
1022 spin_unlock_irqrestore(&desc->lock, flags);
1027 void move_irq(int irq)
1029 /* note - we hold desc->lock */
1031 irq_desc_t *desc = irq_descp(irq);
1032 int redir = test_bit(irq, pending_irq_redir);
1034 if (!cpus_empty(pending_irq_cpumask[irq])) {
1035 cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
1036 if (unlikely(!cpus_empty(tmp))) {
1037 desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0),
1038 pending_irq_cpumask[irq]);
1040 cpus_clear(pending_irq_cpumask[irq]);
1045 #endif /* CONFIG_SMP */
1047 #ifdef CONFIG_HOTPLUG_CPU
1048 unsigned int vectors_in_migration[NR_IRQS];
1051 * Since cpu_online_map is already updated, we just need to check for
1052 * affinity that has zeros
1054 static void migrate_irqs(void)
1060 for (irq=0; irq < NR_IRQS; irq++) {
1061 desc = irq_descp(irq);
1064 * No handling for now.
1065 * TBD: Implement a disable function so we can now
1066 * tell CPU not to respond to these local intr sources.
1067 * such as ITV,CPEI,MCA etc.
1069 if (desc->status == IRQ_PER_CPU)
1072 cpus_and(mask, irq_affinity[irq], cpu_online_map);
1073 if (any_online_cpu(mask) == NR_CPUS) {
1075 * Save it for phase 2 processing
1077 vectors_in_migration[irq] = irq;
1079 new_cpu = any_online_cpu(cpu_online_map);
1080 mask = cpumask_of_cpu(new_cpu);
1083 * Al three are essential, currently WARN_ON.. maybe panic?
1085 if (desc->handler && desc->handler->disable &&
1086 desc->handler->enable && desc->handler->set_affinity) {
1087 desc->handler->disable(irq);
1088 desc->handler->set_affinity(irq, mask);
1089 desc->handler->enable(irq);
1091 WARN_ON((!(desc->handler) || !(desc->handler->disable) ||
1092 !(desc->handler->enable) ||
1093 !(desc->handler->set_affinity)));
1099 void fixup_irqs(void)
1102 extern void ia64_process_pending_intr(void);
1104 ia64_set_itv(1<<16);
1106 * Phase 1: Locate irq's bound to this cpu and
1107 * relocate them for cpu removal.
1112 * Phase 2: Perform interrupt processing for all entries reported in
1115 ia64_process_pending_intr();
1118 * Phase 3: Now handle any interrupts not captured in local APIC.
1119 * This is to account for cases that device interrupted during the time the
1120 * rte was being disabled and re-programmed.
1122 for (irq=0; irq < NR_IRQS; irq++) {
1123 if (vectors_in_migration[irq]) {
1124 vectors_in_migration[irq]=0;
1130 * Now let processor die. We do irq disable and max_xtp() to
1131 * ensure there is no more interrupts routed to this processor.
1132 * But the local timer interrupt can have 1 pending which we
1133 * take care in timer_interrupt().
1136 local_irq_disable();
1140 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
1141 int count, int *eof, void *data)
1143 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
1144 if (count - len < 2)
1146 len += sprintf(page + len, "\n");
1150 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
1151 unsigned long count, void *data)
1153 cpumask_t *mask = (cpumask_t *)data;
1154 unsigned long full_count = count, err;
1155 cpumask_t new_value;
1157 err = cpumask_parse(buffer, count, new_value);
1165 #define MAX_NAMELEN 10
1167 static void register_irq_proc (unsigned int irq)
1169 char name [MAX_NAMELEN];
1171 if (!root_irq_dir || (irq_descp(irq)->handler == &no_irq_type) || irq_dir[irq])
1174 memset(name, 0, MAX_NAMELEN);
1175 sprintf(name, "%d", irq);
1177 /* create /proc/irq/1234 */
1178 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1182 struct proc_dir_entry *entry;
1184 /* create /proc/irq/1234/smp_affinity */
1185 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1189 entry->data = (void *)(long)irq;
1190 entry->read_proc = irq_affinity_read_proc;
1191 entry->write_proc = irq_affinity_write_proc;
1194 smp_affinity_entry[irq] = entry;
1199 cpumask_t prof_cpu_mask = CPU_MASK_ALL;
1201 void init_irq_proc (void)
1203 struct proc_dir_entry *entry;
1206 /* create /proc/irq */
1207 root_irq_dir = proc_mkdir("irq", 0);
1209 /* create /proc/irq/prof_cpu_mask */
1210 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1216 entry->data = (void *)&prof_cpu_mask;
1217 entry->read_proc = prof_cpu_mask_read_proc;
1218 entry->write_proc = prof_cpu_mask_write_proc;
1221 * Create entries for all existing IRQs.
1223 for (i = 0; i < NR_IRQS; i++) {
1224 if (irq_descp(i)->handler == &no_irq_type)
1226 register_irq_proc(i);