2 * linux/arch/ia64/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
12 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
14 * 4/14/2004: Added code to handle cpu migration and do safe irq
15 * migration without lossing interrupts for iosapic
20 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
22 * IRQs are in fact implemented a bit like signal handlers for the kernel.
23 * Naturally it's not a 1:1 relation, but there are similarities.
26 #include <linux/config.h>
27 #include <linux/errno.h>
28 #include <linux/module.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/ioport.h>
32 #include <linux/interrupt.h>
33 #include <linux/timex.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/cpu.h>
37 #include <linux/ctype.h>
38 #include <linux/smp_lock.h>
39 #include <linux/init.h>
40 #include <linux/kernel_stat.h>
41 #include <linux/irq.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/kallsyms.h>
45 #include <linux/notifier.h>
47 #include <asm/atomic.h>
51 #include <asm/system.h>
52 #include <asm/bitops.h>
53 #include <asm/uaccess.h>
54 #include <asm/pgalloc.h>
55 #include <asm/tlbflush.h>
56 #include <asm/delay.h>
59 extern cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
62 * Linux has a controller-independent x86 interrupt architecture.
63 * every controller has a 'controller-template', that is used
64 * by the main code to do the right thing. Each driver-visible
65 * interrupt source is transparently wired to the appropriate
66 * controller. Thus drivers need not be aware of the
67 * interrupt-controller.
69 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
70 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
71 * (IO-APICs assumed to be messaging to Pentium local-APICs)
73 * the code is designed to be easily extended with new/different
74 * interrupt controllers, without having to do assembly magic.
78 * Controller mappings for all interrupt sources:
80 irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned = {
82 .status = IRQ_DISABLED,
83 .handler = &no_irq_type,
84 .lock = SPIN_LOCK_UNLOCKED
88 #ifdef CONFIG_IA64_GENERIC
89 irq_desc_t * __ia64_irq_desc (unsigned int irq)
91 return _irq_desc + irq;
94 ia64_vector __ia64_irq_to_vector (unsigned int irq)
96 return (ia64_vector) irq;
99 unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
101 return (unsigned int) vec;
105 static void register_irq_proc (unsigned int irq);
108 * Special irq handlers.
111 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
115 * Generic no controller code
118 static void enable_none(unsigned int irq) { }
119 static unsigned int startup_none(unsigned int irq) { return 0; }
120 static void disable_none(unsigned int irq) { }
121 static void ack_none(unsigned int irq)
124 * 'what should we do if we get a hw irq event on an illegal vector'.
125 * each architecture has to answer this themselves, it doesn't deserve
126 * a generic callback i think.
129 printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
130 #ifdef CONFIG_X86_LOCAL_APIC
132 * Currently unexpected vectors happen only on SMP and APIC.
133 * We _must_ ack these because every local APIC has only N
134 * irq slots per priority level, and a 'hanging, unacked' IRQ
135 * holds up an irq slot - in excessive cases (when multiple
136 * unexpected vectors occur) that might lock up the APIC
143 printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
147 /* startup is the same as "enable", shutdown is same as "disable" */
148 #define shutdown_none disable_none
149 #define end_none enable_none
151 struct hw_interrupt_type no_irq_type = {
161 atomic_t irq_err_count;
162 #ifdef CONFIG_X86_IO_APIC
163 #ifdef APIC_MISMATCH_DEBUG
164 atomic_t irq_mis_count;
169 * Generic, controller-independent functions:
172 int show_interrupts(struct seq_file *p, void *v)
174 int j, i = *(loff_t *) v;
175 struct irqaction * action;
181 for (j=0; j<NR_CPUS; j++)
183 seq_printf(p, "CPU%d ",j);
188 idesc = irq_descp(i);
189 spin_lock_irqsave(&idesc->lock, flags);
190 action = idesc->action;
193 seq_printf(p, "%3d: ",i);
195 seq_printf(p, "%10u ", kstat_irqs(i));
197 for (j = 0; j < NR_CPUS; j++)
199 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
201 seq_printf(p, " %14s", idesc->handler->typename);
202 seq_printf(p, " %s", action->name);
204 for (action=action->next; action; action = action->next)
205 seq_printf(p, ", %s", action->name);
209 spin_unlock_irqrestore(&idesc->lock, flags);
210 } else if (i == NR_IRQS) {
211 seq_puts(p, "NMI: ");
212 for (j = 0; j < NR_CPUS; j++)
214 seq_printf(p, "%10u ", nmi_count(j));
216 #ifdef CONFIG_X86_LOCAL_APIC
217 seq_puts(p, "LOC: ");
218 for (j = 0; j < NR_CPUS; j++)
220 seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
223 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
224 #ifdef CONFIG_X86_IO_APIC
225 #ifdef APIC_MISMATCH_DEBUG
226 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
234 inline void synchronize_irq(unsigned int irq)
236 while (irq_descp(irq)->status & IRQ_INPROGRESS)
239 EXPORT_SYMBOL(synchronize_irq);
243 * This should really return information about whether
244 * we should do bottom half handling etc. Right now we
245 * end up _always_ checking the bottom half, which is a
246 * waste of time and is not what some drivers would
249 int handle_IRQ_event(unsigned int irq,
250 struct pt_regs *regs, struct irqaction *action)
252 int status = 1; /* Force the "do bottom halves" bit */
255 if (!(action->flags & SA_INTERRUPT))
259 status |= action->flags;
260 retval |= action->handler(irq, action->dev_id, regs);
261 action = action->next;
263 if (status & SA_SAMPLE_RANDOM)
264 add_interrupt_randomness(irq);
269 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
271 struct irqaction *action;
273 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
274 printk(KERN_ERR "irq event %d: bogus return value %x\n",
277 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
280 printk(KERN_ERR "handlers:\n");
281 action = desc->action;
283 printk(KERN_ERR "[<%p>]", action->handler);
284 print_symbol(" (%s)",
285 (unsigned long)action->handler);
287 action = action->next;
291 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
293 static int count = 100;
297 __report_bad_irq(irq, desc, action_ret);
301 static int noirqdebug;
303 static int __init noirqdebug_setup(char *str)
306 printk("IRQ lockup detection disabled\n");
310 __setup("noirqdebug", noirqdebug_setup);
313 * If 99,900 of the previous 100,000 interrupts have not been handled then
314 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
317 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
318 * device sharing an IRQ with the failing one)
320 * Called under desc->lock
322 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
324 if (action_ret != IRQ_HANDLED) {
325 desc->irqs_unhandled++;
326 if (action_ret != IRQ_NONE)
327 report_bad_irq(irq, desc, action_ret);
331 if (desc->irq_count < 100000)
335 if (desc->irqs_unhandled > 99900) {
337 * The interrupt is stuck
339 __report_bad_irq(irq, desc, action_ret);
343 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
344 desc->status |= IRQ_DISABLED;
345 desc->handler->disable(irq);
347 desc->irqs_unhandled = 0;
351 * Generic enable/disable code: this just calls
352 * down into the PIC-specific version for the actual
353 * hardware disable after having gotten the irq
358 * disable_irq_nosync - disable an irq without waiting
359 * @irq: Interrupt to disable
361 * Disable the selected interrupt line. Disables and Enables are
363 * Unlike disable_irq(), this function does not ensure existing
364 * instances of the IRQ handler have completed before returning.
366 * This function may be called from IRQ context.
369 inline void disable_irq_nosync(unsigned int irq)
371 irq_desc_t *desc = irq_descp(irq);
374 spin_lock_irqsave(&desc->lock, flags);
375 if (!desc->depth++) {
376 desc->status |= IRQ_DISABLED;
377 desc->handler->disable(irq);
379 spin_unlock_irqrestore(&desc->lock, flags);
381 EXPORT_SYMBOL(disable_irq_nosync);
384 * disable_irq - disable an irq and wait for completion
385 * @irq: Interrupt to disable
387 * Disable the selected interrupt line. Enables and Disables are
389 * This function waits for any pending IRQ handlers for this interrupt
390 * to complete before returning. If you use this function while
391 * holding a resource the IRQ handler may need you will deadlock.
393 * This function may be called - with care - from IRQ context.
396 void disable_irq(unsigned int irq)
398 irq_desc_t *desc = irq_descp(irq);
400 disable_irq_nosync(irq);
402 synchronize_irq(irq);
404 EXPORT_SYMBOL(disable_irq);
407 * enable_irq - enable handling of an irq
408 * @irq: Interrupt to enable
410 * Undoes the effect of one call to disable_irq(). If this
411 * matches the last disable, processing of interrupts on this
412 * IRQ line is re-enabled.
414 * This function may be called from IRQ context.
417 void enable_irq(unsigned int irq)
419 irq_desc_t *desc = irq_descp(irq);
422 spin_lock_irqsave(&desc->lock, flags);
423 switch (desc->depth) {
425 unsigned int status = desc->status & ~IRQ_DISABLED;
426 desc->status = status;
427 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
428 desc->status = status | IRQ_REPLAY;
429 hw_resend_irq(desc->handler,irq);
431 desc->handler->enable(irq);
438 printk(KERN_ERR "enable_irq(%u) unbalanced from %p\n",
439 irq, (void *) __builtin_return_address(0));
441 spin_unlock_irqrestore(&desc->lock, flags);
443 EXPORT_SYMBOL(enable_irq);
446 * do_IRQ handles all normal device IRQ's (the special
447 * SMP cross-CPU interrupts have their own specific
450 unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
453 * We ack quickly, we don't want the irq controller
454 * thinking we're snobs just because some other CPU has
455 * disabled global interrupts (we have already done the
456 * INT_ACK cycles, it's too late to try to pretend to the
457 * controller that we aren't taking the interrupt).
459 * 0 return value means that this irq is already being
460 * handled by some other CPU. (or is disabled)
462 irq_desc_t *desc = irq_descp(irq);
463 struct irqaction * action;
464 irqreturn_t action_ret;
468 cpu = smp_processor_id(); /* for CONFIG_PREEMPT, this must come after irq_enter()! */
470 kstat_cpu(cpu).irqs[irq]++;
472 if (desc->status & IRQ_PER_CPU) {
473 /* no locking required for CPU-local interrupts: */
474 desc->handler->ack(irq);
475 action_ret = handle_IRQ_event(irq, regs, desc->action);
476 desc->handler->end(irq);
478 spin_lock(&desc->lock);
479 desc->handler->ack(irq);
481 * REPLAY is when Linux resends an IRQ that was dropped earlier
482 * WAITING is used by probe to mark irqs that are being tested
484 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
485 status |= IRQ_PENDING; /* we _want_ to handle it */
488 * If the IRQ is disabled for whatever reason, we cannot
489 * use the action we have.
492 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
493 action = desc->action;
494 status &= ~IRQ_PENDING; /* we commit to handling */
495 status |= IRQ_INPROGRESS; /* we are handling it */
497 desc->status = status;
500 * If there is no IRQ handler or it was disabled, exit early.
501 * Since we set PENDING, if another processor is handling
502 * a different instance of this same irq, the other processor
503 * will take care of it.
505 if (unlikely(!action))
509 * Edge triggered interrupts need to remember
511 * This applies to any hw interrupts that allow a second
512 * instance of the same irq to arrive while we are in do_IRQ
513 * or in the handler. But the code here only handles the _second_
514 * instance of the irq, not the third or fourth. So it is mostly
515 * useful for irq hardware that does not mask cleanly in an
519 spin_unlock(&desc->lock);
520 action_ret = handle_IRQ_event(irq, regs, action);
521 spin_lock(&desc->lock);
523 note_interrupt(irq, desc, action_ret);
524 if (!(desc->status & IRQ_PENDING))
526 desc->status &= ~IRQ_PENDING;
528 desc->status &= ~IRQ_INPROGRESS;
531 * The ->end() handler has to deal with interrupts which got
532 * disabled while the handler was running.
534 desc->handler->end(irq);
535 spin_unlock(&desc->lock);
541 * request_irq - allocate an interrupt line
542 * @irq: Interrupt line to allocate
543 * @handler: Function to be called when the IRQ occurs
544 * @irqflags: Interrupt type flags
545 * @devname: An ascii name for the claiming device
546 * @dev_id: A cookie passed back to the handler function
548 * This call allocates interrupt resources and enables the
549 * interrupt line and IRQ handling. From the point this
550 * call is made your handler function may be invoked. Since
551 * your handler function must clear any interrupt the board
552 * raises, you must take care both to initialise your hardware
553 * and to set up the interrupt handler in the right order.
555 * Dev_id must be globally unique. Normally the address of the
556 * device data structure is used as the cookie. Since the handler
557 * receives this value it makes sense to use it.
559 * If your interrupt is shared you must pass a non NULL dev_id
560 * as this is required when freeing the interrupt.
564 * SA_SHIRQ Interrupt is shared
566 * SA_INTERRUPT Disable local interrupts while processing
568 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
572 int request_irq(unsigned int irq,
573 irqreturn_t (*handler)(int, void *, struct pt_regs *),
574 unsigned long irqflags,
575 const char * devname,
579 struct irqaction * action;
583 * Sanity-check: shared interrupts should REALLY pass in
584 * a real dev-ID, otherwise we'll have trouble later trying
585 * to figure out which interrupt is which (messes up the
586 * interrupt freeing logic etc).
588 if (irqflags & SA_SHIRQ) {
590 printk(KERN_ERR "Bad boy: %s called us without a dev_id!\n", devname);
599 action = (struct irqaction *)
600 kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
604 action->handler = handler;
605 action->flags = irqflags;
607 action->name = devname;
609 action->dev_id = dev_id;
611 retval = setup_irq(irq, action);
617 EXPORT_SYMBOL(request_irq);
620 * free_irq - free an interrupt
621 * @irq: Interrupt line to free
622 * @dev_id: Device identity to free
624 * Remove an interrupt handler. The handler is removed and if the
625 * interrupt line is no longer in use by any driver it is disabled.
626 * On a shared IRQ the caller must ensure the interrupt is disabled
627 * on the card it drives before calling this function. The function
628 * does not return until any executing interrupts for this IRQ
631 * This function must not be called from interrupt context.
634 void free_irq(unsigned int irq, void *dev_id)
637 struct irqaction **p;
643 desc = irq_descp(irq);
644 spin_lock_irqsave(&desc->lock,flags);
647 struct irqaction * action = *p;
649 struct irqaction **pp = p;
651 if (action->dev_id != dev_id)
654 /* Found it - now remove it from the list of entries */
657 desc->status |= IRQ_DISABLED;
658 desc->handler->shutdown(irq);
660 spin_unlock_irqrestore(&desc->lock,flags);
662 /* Wait to make sure it's not being used on another CPU */
663 synchronize_irq(irq);
667 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
668 spin_unlock_irqrestore(&desc->lock,flags);
673 EXPORT_SYMBOL(free_irq);
676 * IRQ autodetection code..
678 * This depends on the fact that any interrupt that
679 * comes in on to an unassigned handler will get stuck
680 * with "IRQ_WAITING" cleared and the interrupt
684 static DECLARE_MUTEX(probe_sem);
687 * probe_irq_on - begin an interrupt autodetect
689 * Commence probing for an interrupt. The interrupts are scanned
690 * and a mask of potential interrupt lines is returned.
694 unsigned long probe_irq_on(void)
703 * something may have generated an irq long ago and we want to
704 * flush such a longstanding irq before considering it as spurious.
706 for (i = NR_IRQS-1; i > 0; i--) {
709 spin_lock_irq(&desc->lock);
711 desc->handler->startup(i);
712 spin_unlock_irq(&desc->lock);
715 /* Wait for longstanding interrupts to trigger. */
716 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
717 /* about 20ms delay */ barrier();
720 * enable any unassigned irqs
721 * (we must startup again here because if a longstanding irq
722 * happened in the previous stage, it may have masked itself)
724 for (i = NR_IRQS-1; i > 0; i--) {
727 spin_lock_irq(&desc->lock);
729 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
730 if (desc->handler->startup(i))
731 desc->status |= IRQ_PENDING;
733 spin_unlock_irq(&desc->lock);
737 * Wait for spurious interrupts to trigger
739 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
740 /* about 100ms delay */ barrier();
743 * Now filter out any obviously spurious interrupts
746 for (i = 0; i < NR_IRQS; i++) {
747 irq_desc_t *desc = irq_descp(i);
750 spin_lock_irq(&desc->lock);
751 status = desc->status;
753 if (status & IRQ_AUTODETECT) {
754 /* It triggered already - consider it spurious. */
755 if (!(status & IRQ_WAITING)) {
756 desc->status = status & ~IRQ_AUTODETECT;
757 desc->handler->shutdown(i);
762 spin_unlock_irq(&desc->lock);
768 EXPORT_SYMBOL(probe_irq_on);
771 * probe_irq_mask - scan a bitmap of interrupt lines
772 * @val: mask of interrupts to consider
774 * Scan the ISA bus interrupt lines and return a bitmap of
775 * active interrupts. The interrupt probe logic state is then
776 * returned to its previous value.
778 * Note: we need to scan all the irq's even though we will
779 * only return ISA irq numbers - just so that we reset them
780 * all to a known state.
782 unsigned int probe_irq_mask(unsigned long val)
788 for (i = 0; i < 16; i++) {
789 irq_desc_t *desc = irq_descp(i);
792 spin_lock_irq(&desc->lock);
793 status = desc->status;
795 if (status & IRQ_AUTODETECT) {
796 if (!(status & IRQ_WAITING))
799 desc->status = status & ~IRQ_AUTODETECT;
800 desc->handler->shutdown(i);
802 spin_unlock_irq(&desc->lock);
808 EXPORT_SYMBOL(probe_irq_mask);
811 * probe_irq_off - end an interrupt autodetect
812 * @val: mask of potential interrupts (unused)
814 * Scans the unused interrupt lines and returns the line which
815 * appears to have triggered the interrupt. If no interrupt was
816 * found then zero is returned. If more than one interrupt is
817 * found then minus the first candidate is returned to indicate
820 * The interrupt probe logic state is returned to its previous
823 * BUGS: When used in a module (which arguably shouldn't happen)
824 * nothing prevents two IRQ probe callers from overlapping. The
825 * results of this are non-optimal.
828 int probe_irq_off(unsigned long val)
830 int i, irq_found, nr_irqs;
834 for (i = 0; i < NR_IRQS; i++) {
835 irq_desc_t *desc = irq_descp(i);
838 spin_lock_irq(&desc->lock);
839 status = desc->status;
841 if (status & IRQ_AUTODETECT) {
842 if (!(status & IRQ_WAITING)) {
847 desc->status = status & ~IRQ_AUTODETECT;
848 desc->handler->shutdown(i);
850 spin_unlock_irq(&desc->lock);
855 irq_found = -irq_found;
859 EXPORT_SYMBOL(probe_irq_off);
861 int setup_irq(unsigned int irq, struct irqaction * new)
865 struct irqaction *old, **p;
866 irq_desc_t *desc = irq_descp(irq);
868 if (desc->handler == &no_irq_type)
871 * Some drivers like serial.c use request_irq() heavily,
872 * so we have to be careful not to interfere with a
875 if (new->flags & SA_SAMPLE_RANDOM) {
877 * This function might sleep, we want to call it first,
878 * outside of the atomic block.
879 * Yes, this might clear the entropy pool if the wrong
880 * driver is attempted to be loaded, without actually
881 * installing a new handler, but is this really a problem,
882 * only the sysadmin is able to do this.
884 rand_initialize_irq(irq);
887 if (new->flags & SA_PERCPU_IRQ) {
888 desc->status |= IRQ_PER_CPU;
889 desc->handler = &irq_type_ia64_lsapic;
893 * The following block of code has to be executed atomically
895 spin_lock_irqsave(&desc->lock,flags);
897 if ((old = *p) != NULL) {
898 /* Can't share interrupts unless both agree to */
899 if (!(old->flags & new->flags & SA_SHIRQ)) {
900 spin_unlock_irqrestore(&desc->lock,flags);
904 /* add new interrupt at end of irq queue */
916 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
917 desc->handler->startup(irq);
919 spin_unlock_irqrestore(&desc->lock,flags);
921 register_irq_proc(irq);
925 static struct proc_dir_entry * root_irq_dir;
926 static struct proc_dir_entry * irq_dir [NR_IRQS];
930 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
932 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
934 static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
936 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
938 cpumask_t mask = CPU_MASK_NONE;
940 cpu_set(cpu_logical_id(hwid), mask);
943 irq_affinity[irq] = mask;
944 irq_redir[irq] = (char) (redir & 0xff);
948 static int irq_affinity_read_proc (char *page, char **start, off_t off,
949 int count, int *eof, void *data)
951 int len = sprintf(page, "%s", irq_redir[(long)data] ? "r " : "");
953 len += cpumask_scnprintf(page+len, count, irq_affinity[(long)data]);
956 len += sprintf(page + len, "\n");
960 static int irq_affinity_write_proc (struct file *file, const char *buffer,
961 unsigned long count, void *data)
963 unsigned int irq = (unsigned long) data;
964 int full_count = count, err;
965 cpumask_t new_value, tmp;
966 # define R_PREFIX_LEN 16
967 char rbuf[R_PREFIX_LEN];
970 irq_desc_t *desc = irq_descp(irq);
973 if (!desc->handler->set_affinity)
977 * If string being written starts with a prefix of 'r' or 'R'
978 * and some limited number of spaces, set IA64_IRQ_REDIRECTED.
979 * If more than (R_PREFIX_LEN - 2) spaces are passed, they won't
980 * all be trimmed as part of prelen, the untrimmed spaces will
981 * cause the hex parsing to fail, and this write() syscall will
987 rlen = min(sizeof(rbuf)-1, count);
988 if (copy_from_user(rbuf, buffer, rlen))
992 if (tolower(*rbuf) == 'r') {
993 prelen = strspn(rbuf, "Rr ");
994 irq |= IA64_IRQ_REDIRECTED;
997 err = cpumask_parse(buffer+prelen, count-prelen, new_value);
1002 * Do not allow disabling IRQs completely - it's a too easy
1003 * way to make the system unusable accidentally :-) At least
1004 * one online CPU still has to be targeted.
1006 cpus_and(tmp, new_value, cpu_online_map);
1007 if (cpus_empty(tmp))
1010 spin_lock_irqsave(&desc->lock, flags);
1011 pending_irq_cpumask[irq] = new_value;
1012 spin_unlock_irqrestore(&desc->lock, flags);
1017 #endif /* CONFIG_SMP */
1019 #ifdef CONFIG_HOTPLUG_CPU
1020 unsigned int vectors_in_migration[NR_IRQS];
1023 * Since cpu_online_map is already updated, we just need to check for
1024 * affinity that has zeros
1026 static void migrate_irqs(void)
1032 for (irq=0; irq < NR_IRQS; irq++) {
1033 desc = irq_descp(irq);
1036 * No handling for now.
1037 * TBD: Implement a disable function so we can now
1038 * tell CPU not to respond to these local intr sources.
1039 * such as ITV,CPEI,MCA etc.
1041 if (desc->status == IRQ_PER_CPU)
1044 cpus_and(mask, irq_affinity[irq], cpu_online_map);
1045 if (any_online_cpu(mask) == NR_CPUS) {
1047 * Save it for phase 2 processing
1049 vectors_in_migration[irq] = irq;
1051 new_cpu = any_online_cpu(cpu_online_map);
1052 mask = cpumask_of_cpu(new_cpu);
1055 * Al three are essential, currently WARN_ON.. maybe panic?
1057 if (desc->handler && desc->handler->disable &&
1058 desc->handler->enable && desc->handler->set_affinity) {
1059 desc->handler->disable(irq);
1060 desc->handler->set_affinity(irq, mask);
1061 desc->handler->enable(irq);
1063 WARN_ON((!(desc->handler) || !(desc->handler->disable) ||
1064 !(desc->handler->enable) ||
1065 !(desc->handler->set_affinity)));
1071 void fixup_irqs(void)
1074 extern void ia64_process_pending_intr(void);
1076 ia64_set_itv(1<<16);
1078 * Phase 1: Locate irq's bound to this cpu and
1079 * relocate them for cpu removal.
1084 * Phase 2: Perform interrupt processing for all entries reported in
1087 ia64_process_pending_intr();
1090 * Phase 3: Now handle any interrupts not captured in local APIC.
1091 * This is to account for cases that device interrupted during the time the
1092 * rte was being disabled and re-programmed.
1094 for (irq=0; irq < NR_IRQS; irq++) {
1095 if (vectors_in_migration[irq]) {
1096 vectors_in_migration[irq]=0;
1102 * Now let processor die. We do irq disable and max_xtp() to
1103 * ensure there is no more interrupts routed to this processor.
1104 * But the local timer interrupt can have 1 pending which we
1105 * take care in timer_interrupt().
1108 local_irq_disable();
1112 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
1113 int count, int *eof, void *data)
1115 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
1116 if (count - len < 2)
1118 len += sprintf(page + len, "\n");
1122 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
1123 unsigned long count, void *data)
1125 cpumask_t *mask = (cpumask_t *)data;
1126 unsigned long full_count = count, err;
1127 cpumask_t new_value;
1129 err = cpumask_parse(buffer, count, new_value);
1137 #define MAX_NAMELEN 10
1139 static void register_irq_proc (unsigned int irq)
1141 char name [MAX_NAMELEN];
1143 if (!root_irq_dir || (irq_descp(irq)->handler == &no_irq_type) || irq_dir[irq])
1146 memset(name, 0, MAX_NAMELEN);
1147 sprintf(name, "%d", irq);
1149 /* create /proc/irq/1234 */
1150 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1154 struct proc_dir_entry *entry;
1156 /* create /proc/irq/1234/smp_affinity */
1157 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1161 entry->data = (void *)(long)irq;
1162 entry->read_proc = irq_affinity_read_proc;
1163 entry->write_proc = irq_affinity_write_proc;
1166 smp_affinity_entry[irq] = entry;
1171 cpumask_t prof_cpu_mask = CPU_MASK_ALL;
1173 void init_irq_proc (void)
1175 struct proc_dir_entry *entry;
1178 /* create /proc/irq */
1179 root_irq_dir = proc_mkdir("irq", 0);
1181 /* create /proc/irq/prof_cpu_mask */
1182 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
1188 entry->data = (void *)&prof_cpu_mask;
1189 entry->read_proc = prof_cpu_mask_read_proc;
1190 entry->write_proc = prof_cpu_mask_write_proc;
1193 * Create entries for all existing IRQs.
1195 for (i = 0; i < NR_IRQS; i++) {
1196 if (irq_descp(i)->handler == &no_irq_type)
1198 register_irq_proc(i);