return IRQ_NONE;
}
+/**
+ * aac_rkt_enable_interrupt - Enable event reporting
+ * @dev: Adapter
+ * @event: Event to enable
+ *
+ * Enable event reporting from the i960 for a given event.
+ */
+
+static void aac_rkt_enable_interrupt(struct aac_dev * dev, u32 event)
+{
+ switch (event) {
+
+ case HostNormCmdQue:
+ dev->irq_mask &= ~(OUTBOUNDDOORBELL_1);
+ break;
+
+ case HostNormRespQue:
+ dev->irq_mask &= ~(OUTBOUNDDOORBELL_2);
+ break;
+
+ case AdapNormCmdNotFull:
+ dev->irq_mask &= ~(OUTBOUNDDOORBELL_3);
+ break;
+
+ case AdapNormRespNotFull:
+ dev->irq_mask &= ~(OUTBOUNDDOORBELL_4);
+ break;
+ }
+}
+
+/**
+ * aac_rkt_disable_interrupt - Disable event reporting
+ * @dev: Adapter
+ * @event: Event to enable
+ *
+ * Disable event reporting from the i960 for a given event.
+ */
+
+static void aac_rkt_disable_interrupt(struct aac_dev *dev, u32 event)
+{
+ switch (event) {
+
+ case HostNormCmdQue:
+ dev->irq_mask |= (OUTBOUNDDOORBELL_1);
+ break;
+
+ case HostNormRespQue:
+ dev->irq_mask |= (OUTBOUNDDOORBELL_2);
+ break;
+
+ case AdapNormCmdNotFull:
+ dev->irq_mask |= (OUTBOUNDDOORBELL_3);
+ break;
+
+ case AdapNormRespNotFull:
+ dev->irq_mask |= (OUTBOUNDDOORBELL_4);
+ break;
+ }
+}
+
/**
* rkt_sync_cmd - send a command and wait
* @dev: Adapter
/*
* Disable doorbell interrupts
*/
- rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
+ rkt_writeb(dev, MUnit.OIMR, dev->OIMR |= 0x04);
/*
* Force the completion of the mask register write before issuing
* the interrupt.
/*
* Restore interrupt mask even though we timed out
*/
- rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
+ rkt_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
return -ETIMEDOUT;
}
/*
* Pull the synch status from Mailbox 0.
*/
- if (status)
- *status = le32_to_cpu(rkt_readl(dev, IndexRegs.Mailbox[0]));
+ *status = le32_to_cpu(rkt_readl(dev, IndexRegs.Mailbox[0]));
/*
* Clear the synch command doorbell.
*/
/*
* Restore interrupt mask
*/
- rkt_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
+ rkt_writeb(dev, MUnit.OIMR, dev->OIMR &= 0xfb);
return 0;
}
*/
static int aac_rkt_check_health(struct aac_dev *dev)
{
- u32 status = le32_to_cpu(rkt_readl(dev, MUnit.OMRx[0]));
+ long status = rkt_readl(dev, IndexRegs.Mailbox[7]);
/*
* Check to see if the board failed any self tests.
/*
* Check to see if the board panic'd.
*/
- if (status & KERNEL_PANIC) {
- char * buffer;
+ if (status & KERNEL_PANIC)
+ {
+ char * buffer = kmalloc(512, GFP_KERNEL|__GFP_DMA);
struct POSTSTATUS {
u32 Post_Command;
u32 Post_Address;
- } * post;
- dma_addr_t paddr, baddr;
- int ret;
-
- if ((status & 0xFF000000L) == 0xBC000000L)
- return (status >> 16) & 0xFF;
- buffer = pci_alloc_consistent(dev->pdev, 512, &baddr);
- ret = -2;
- if (buffer == NULL)
- return ret;
- post = pci_alloc_consistent(dev->pdev,
- sizeof(struct POSTSTATUS), &paddr);
- if (post == NULL) {
- pci_free_consistent(dev->pdev, 512, buffer, baddr);
- return ret;
+ } * post = kmalloc(sizeof(struct POSTSTATUS), GFP_KERNEL);
+ dma_addr_t paddr = pci_map_single(dev->pdev, post, sizeof(struct POSTSTATUS), 2);
+ dma_addr_t baddr = pci_map_single(dev->pdev, buffer, 512, 1);
+ u32 status = -1;
+ int ret = -2;
+
+ memset(buffer, 0, 512);
+ post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
+ post->Post_Address = cpu_to_le32(baddr);
+ rkt_writel(dev, MUnit.IMRx[0], cpu_to_le32(paddr));
+ rkt_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, &status);
+ pci_unmap_single(dev->pdev, paddr, sizeof(struct POSTSTATUS),2);
+ kfree(post);
+ if ((buffer[0] == '0') && (buffer[1] == 'x')) {
+ ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
+ ret <<= 4;
+ ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
}
- memset(buffer, 0, 512);
- post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
- post->Post_Address = cpu_to_le32(baddr);
- rkt_writel(dev, MUnit.IMRx[0], cpu_to_le32(paddr));
- rkt_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, &status);
- pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS),
- post, paddr);
- if ((buffer[0] == '0') && (buffer[1] == 'x')) {
- ret = (buffer[2] <= '9') ? (buffer[2] - '0') : (buffer[2] - 'A' + 10);
- ret <<= 4;
- ret += (buffer[3] <= '9') ? (buffer[3] - '0') : (buffer[3] - 'A' + 10);
- }
- pci_free_consistent(dev->pdev, 512, buffer, baddr);
- return ret;
- }
+ pci_unmap_single(dev->pdev, baddr, 512, 1);
+ kfree(buffer);
+ return ret;
+ }
/*
* Wait for the adapter to be up and running.
*/
/*
* Map in the registers from the adapter.
*/
- if((dev->regs.rkt = ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
+ if((dev->regs.rkt = (struct rkt_registers *)ioremap((unsigned long)dev->scsi_host_ptr->base, 8192))==NULL)
{
printk(KERN_WARNING "aacraid: unable to map i960.\n" );
goto error_iounmap;
* Fill in the function dispatch table.
*/
dev->a_ops.adapter_interrupt = aac_rkt_interrupt_adapter;
+ dev->a_ops.adapter_enable_int = aac_rkt_enable_interrupt;
+ dev->a_ops.adapter_disable_int = aac_rkt_disable_interrupt;
dev->a_ops.adapter_notify = aac_rkt_notify_adapter;
dev->a_ops.adapter_sync_cmd = rkt_sync_cmd;
dev->a_ops.adapter_check_health = aac_rkt_check_health;