VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / arch / ppc64 / kernel / irq.c
index 7335442..8461687 100644 (file)
@@ -68,8 +68,8 @@ irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
 };
 
 int __irq_offset_value;
-int ppc_spurious_interrupts = 0;
-unsigned long lpEvent_count = 0;
+int ppc_spurious_interrupts;
+unsigned long lpevent_count;
 
 int
 setup_irq(unsigned int irq, struct irqaction * new)
@@ -143,47 +143,6 @@ EXPORT_SYMBOL(synchronize_irq);
 
 #endif /* CONFIG_SMP */
 
-/* XXX Make this into free_irq() - Anton */
-
-/* This could be promoted to a real free_irq() ... */
-static int
-do_free_irq(int irq, void* dev_id)
-{
-       irq_desc_t *desc = get_irq_desc(irq);
-       struct irqaction **p;
-       unsigned long flags;
-
-       spin_lock_irqsave(&desc->lock,flags);
-       p = &desc->action;
-       for (;;) {
-               struct irqaction * action = *p;
-               if (action) {
-                       struct irqaction **pp = p;
-                       p = &action->next;
-                       if (action->dev_id != dev_id)
-                               continue;
-
-                       /* Found it - now remove it from the list of entries */
-                       *pp = action->next;
-                       if (!desc->action) {
-                               desc->status |= IRQ_DISABLED;
-                               mask_irq(irq);
-                       }
-                       spin_unlock_irqrestore(&desc->lock,flags);
-
-                       /* Wait to make sure it's not being used on another CPU */
-                       synchronize_irq(irq);
-                       kfree(action);
-                       return 0;
-               }
-               printk("Trying to free free IRQ%d\n",irq);
-               spin_unlock_irqrestore(&desc->lock,flags);
-               break;
-       }
-       return -ENOENT;
-}
-
-
 int request_irq(unsigned int irq,
        irqreturn_t (*handler)(int, void *, struct pt_regs *),
        unsigned long irqflags, const char * devname, void *dev_id)
@@ -194,8 +153,7 @@ int request_irq(unsigned int irq,
        if (irq >= NR_IRQS)
                return -EINVAL;
        if (!handler)
-               /* We could implement really free_irq() instead of that... */
-               return do_free_irq(irq, dev_id);
+               return -EINVAL;
 
        action = (struct irqaction *)
                kmalloc(sizeof(struct irqaction), GFP_KERNEL);
@@ -206,7 +164,7 @@ int request_irq(unsigned int irq,
 
        action->handler = handler;
        action->flags = irqflags;
-       action->mask = 0;
+       cpus_clear(action->mask);
        action->name = devname;
        action->dev_id = dev_id;
        action->next = NULL;
@@ -222,7 +180,38 @@ EXPORT_SYMBOL(request_irq);
 
 void free_irq(unsigned int irq, void *dev_id)
 {
-       request_irq(irq, NULL, 0, NULL, dev_id);
+       irq_desc_t *desc = get_irq_desc(irq);
+       struct irqaction **p;
+       unsigned long flags;
+
+       spin_lock_irqsave(&desc->lock,flags);
+       p = &desc->action;
+       for (;;) {
+               struct irqaction * action = *p;
+               if (action) {
+                       struct irqaction **pp = p;
+                       p = &action->next;
+                       if (action->dev_id != dev_id)
+                               continue;
+
+                       /* Found it - now remove it from the list of entries */
+                       *pp = action->next;
+                       if (!desc->action) {
+                               desc->status |= IRQ_DISABLED;
+                               mask_irq(irq);
+                       }
+                       spin_unlock_irqrestore(&desc->lock,flags);
+
+                       /* Wait to make sure it's not being used on another CPU */
+                       synchronize_irq(irq);
+                       kfree(action);
+                       return;
+               }
+               printk("Trying to free free IRQ%d\n",irq);
+               spin_unlock_irqrestore(&desc->lock,flags);
+               break;
+       }
+       return;
 }
 
 EXPORT_SYMBOL(free_irq);
@@ -589,7 +578,7 @@ out:
 }
 
 #ifdef CONFIG_PPC_ISERIES
-int do_IRQ(struct pt_regs *regs)
+void do_IRQ(struct pt_regs *regs)
 {
        struct paca_struct *lpaca;
        struct ItLpQueue *lpq;
@@ -613,31 +602,29 @@ int do_IRQ(struct pt_regs *regs)
 
        lpaca = get_paca();
 #ifdef CONFIG_SMP
-       if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) {
-               lpaca->xLpPaca.xIntDword.xFields.xIpiCnt = 0;
+       if (lpaca->lppaca.xIntDword.xFields.xIpiCnt) {
+               lpaca->lppaca.xIntDword.xFields.xIpiCnt = 0;
                iSeries_smp_message_recv(regs);
        }
 #endif /* CONFIG_SMP */
-       lpq = lpaca->lpQueuePtr;
+       lpq = lpaca->lpqueue_ptr;
        if (lpq && ItLpQueue_isLpIntPending(lpq))
-               lpEvent_count += ItLpQueue_process(lpq, regs);
+               lpevent_count += ItLpQueue_process(lpq, regs);
 
        irq_exit();
 
-       if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
-               lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
+       if (lpaca->lppaca.xIntDword.xFields.xDecrInt) {
+               lpaca->lppaca.xIntDword.xFields.xDecrInt = 0;
                /* Signal a fake decrementer interrupt */
                timer_interrupt(regs);
        }
-
-       return 1; /* lets ret_from_int know we can do checks */
 }
 
 #else  /* CONFIG_PPC_ISERIES */
 
-int do_IRQ(struct pt_regs *regs)
+void do_IRQ(struct pt_regs *regs)
 {
-       int irq, first = 1;
+       int irq;
 
        irq_enter();
 
@@ -656,25 +643,15 @@ int do_IRQ(struct pt_regs *regs)
        }
 #endif
 
-       /*
-        * Every arch is required to implement ppc_md.get_irq.
-        * This function will either return an irq number or -1 to
-        * indicate there are no more pending.  But the first time
-        * through the loop this means there wasn't an IRQ pending.
-        * The value -2 is for buggy hardware and means that this IRQ
-        * has already been handled. -- Tom
-        */
-       while ((irq = ppc_md.get_irq(regs)) >= 0) {
+       irq = ppc_md.get_irq(regs);
+
+       if (irq >= 0)
                ppc_irq_dispatch_handler(regs, irq);
-               first = 0;
-       }
-       if (irq != -2 && first)
+       else
                /* That's not SMP safe ... but who cares ? */
                ppc_spurious_interrupts++;
 
        irq_exit();
-
-       return 1; /* lets ret_from_int know we can do checks */
 }
 #endif /* CONFIG_PPC_ISERIES */
 
@@ -738,7 +715,6 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer
        irq_desc_t *desc = get_irq_desc(irq);
        int ret;
        cpumask_t new_value, tmp;
-       cpumask_t allcpus = CPU_MASK_ALL;
 
        if (!desc->handler->set_affinity)
                return -EIO;
@@ -753,7 +729,7 @@ static int irq_affinity_write_proc (struct file *file, const char __user *buffer
         * NR_CPUS == 32 and cpumask is a long), so we mask it here to
         * be consistent.
         */
-       cpus_and(new_value, new_value, allcpus);
+       cpus_and(new_value, new_value, CPU_MASK_ALL);
 
        /*
         * Grab lock here so cpu_online_map can't change, and also
@@ -808,11 +784,10 @@ static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffe
        {
                unsigned i;
                for (i=0; i<NR_CPUS; ++i) {
-                       if ( paca[i].prof_buffer && (new_value & 1) )
+                       if ( paca[i].prof_buffer && cpu_isset(i, new_value) )
                                paca[i].prof_enabled = 1;
                        else
                                paca[i].prof_enabled = 0;
-                       new_value >>= 1;
                }
        }
 #endif
@@ -857,7 +832,7 @@ void init_irq_proc (void)
        int i;
 
        /* create /proc/irq */
-       root_irq_dir = proc_mkdir("irq", 0);
+       root_irq_dir = proc_mkdir("irq", NULL);
 
        /* create /proc/irq/prof_cpu_mask */
        entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);