2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/pci.h>
40 #include "mthca_dev.h"
41 #include "mthca_cmd.h"
42 #include "mthca_config_reg.h"
45 MTHCA_NUM_ASYNC_EQE = 0x80,
46 MTHCA_NUM_CMD_EQE = 0x80,
47 MTHCA_EQ_ENTRY_SIZE = 0x20
51 * Must be packed because start is 64 bits but only aligned to 32 bits.
53 struct mthca_eq_context {
66 } __attribute__((packed));
68 #define MTHCA_EQ_STATUS_OK ( 0 << 28)
69 #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
70 #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)
71 #define MTHCA_EQ_OWNER_SW ( 0 << 24)
72 #define MTHCA_EQ_OWNER_HW ( 1 << 24)
73 #define MTHCA_EQ_FLAG_TR ( 1 << 18)
74 #define MTHCA_EQ_FLAG_OI ( 1 << 17)
75 #define MTHCA_EQ_STATE_ARMED ( 1 << 8)
76 #define MTHCA_EQ_STATE_FIRED ( 2 << 8)
77 #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)
80 MTHCA_EVENT_TYPE_COMP = 0x00,
81 MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
82 MTHCA_EVENT_TYPE_COMM_EST = 0x02,
83 MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
84 MTHCA_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
85 MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
86 MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
87 MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
88 MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
89 MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
90 MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
91 MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
92 MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
93 MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09,
94 MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
95 MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e,
96 MTHCA_EVENT_TYPE_CMD = 0x0a
99 #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
100 (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
101 (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
102 (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
103 (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
104 (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
105 (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
106 (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
107 (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
108 (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
109 (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
110 (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
111 #define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
112 (1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE)
113 #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
115 #define MTHCA_EQ_DB_INC_CI (1 << 24)
116 #define MTHCA_EQ_DB_REQ_NOT (2 << 24)
117 #define MTHCA_EQ_DB_DISARM_CQ (3 << 24)
118 #define MTHCA_EQ_DB_SET_CI (4 << 24)
119 #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
130 } __attribute__((packed)) comp;
138 } __attribute__((packed)) cmd;
141 } __attribute__((packed)) qp;
147 } __attribute__((packed)) cq_err;
151 } __attribute__((packed)) port_change;
155 } __attribute__((packed));
157 #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
158 #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
160 static inline u64 async_mask(struct mthca_dev *dev)
162 return dev->mthca_flags & MTHCA_FLAG_SRQ ?
163 MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
164 MTHCA_ASYNC_EVENT_MASK;
167 static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
171 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_SET_CI | eq->eqn);
172 doorbell[1] = cpu_to_be32(ci & (eq->nent - 1));
174 mthca_write64(doorbell,
175 dev->kar + MTHCA_EQ_DOORBELL,
176 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
179 static inline void eq_req_not(struct mthca_dev *dev, int eqn)
183 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_REQ_NOT | eqn);
186 mthca_write64(doorbell,
187 dev->kar + MTHCA_EQ_DOORBELL,
188 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
191 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
195 doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn);
196 doorbell[1] = cpu_to_be32(cqn);
198 mthca_write64(doorbell,
199 dev->kar + MTHCA_EQ_DOORBELL,
200 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
203 static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
205 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
206 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
209 static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq)
211 struct mthca_eqe* eqe;
212 eqe = get_eqe(eq, eq->cons_index);
213 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
216 static inline void set_eqe_hw(struct mthca_eqe *eqe)
218 eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW;
221 static void port_change(struct mthca_dev *dev, int port, int active)
223 struct ib_event record;
225 mthca_dbg(dev, "Port change to %s for port %d\n",
226 active ? "active" : "down", port);
228 record.device = &dev->ib_dev;
229 record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
230 record.element.port_num = port;
232 ib_dispatch_event(&record);
235 static void mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
237 struct mthca_eqe *eqe;
241 while ((eqe = next_eqe_sw(eq))) {
245 * Make sure we read EQ entry contents after we've
246 * checked the ownership bit.
251 case MTHCA_EVENT_TYPE_COMP:
252 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
253 disarm_cq(dev, eq->eqn, disarm_cqn);
254 mthca_cq_event(dev, disarm_cqn);
257 case MTHCA_EVENT_TYPE_PATH_MIG:
258 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
262 case MTHCA_EVENT_TYPE_COMM_EST:
263 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
267 case MTHCA_EVENT_TYPE_SQ_DRAINED:
268 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
269 IB_EVENT_SQ_DRAINED);
272 case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
273 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
277 case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
278 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
279 IB_EVENT_PATH_MIG_ERR);
282 case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
283 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
284 IB_EVENT_QP_REQ_ERR);
287 case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
288 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
289 IB_EVENT_QP_ACCESS_ERR);
292 case MTHCA_EVENT_TYPE_CMD:
294 be16_to_cpu(eqe->event.cmd.token),
295 eqe->event.cmd.status,
296 be64_to_cpu(eqe->event.cmd.out_param));
298 * cmd_event() may add more commands.
299 * The card will think the queue has overflowed if
300 * we don't tell it we've been processing events.
305 case MTHCA_EVENT_TYPE_PORT_CHANGE:
307 (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,
308 eqe->subtype == 0x4);
311 case MTHCA_EVENT_TYPE_CQ_ERROR:
312 mthca_warn(dev, "CQ %s on CQN %08x\n",
313 eqe->event.cq_err.syndrome == 1 ?
314 "overrun" : "access violation",
315 be32_to_cpu(eqe->event.cq_err.cqn));
318 case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
319 mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
322 case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
323 case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
324 case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
325 case MTHCA_EVENT_TYPE_ECC_DETECT:
327 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
328 eqe->type, eqe->subtype, eq->eqn);
337 wmb(); /* see comment below */
338 set_eq_ci(dev, eq, eq->cons_index);
344 * This barrier makes sure that all updates to
345 * ownership bits done by set_eqe_hw() hit memory
346 * before the consumer index is updated. set_eq_ci()
347 * allows the HCA to possibly write more EQ entries,
348 * and we want to avoid the exceedingly unlikely
349 * possibility of the HCA writing an entry and then
350 * having set_eqe_hw() overwrite the owner field.
352 if (likely(eqes_found)) {
354 set_eq_ci(dev, eq, eq->cons_index);
356 eq_req_not(dev, eq->eqn);
359 static irqreturn_t mthca_interrupt(int irq, void *dev_ptr, struct pt_regs *regs)
361 struct mthca_dev *dev = dev_ptr;
366 if (dev->eq_table.clr_mask)
367 writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
369 if ((ecr = readl(dev->ecr_base + 4)) != 0) {
372 writel(ecr, dev->ecr_base +
373 MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
375 for (i = 0; i < MTHCA_NUM_EQ; ++i)
376 if (ecr & dev->eq_table.eq[i].ecr_mask)
377 mthca_eq_int(dev, &dev->eq_table.eq[i]);
380 return IRQ_RETVAL(work);
383 static irqreturn_t mthca_msi_x_interrupt(int irq, void *eq_ptr,
384 struct pt_regs *regs)
386 struct mthca_eq *eq = eq_ptr;
387 struct mthca_dev *dev = eq->dev;
389 mthca_eq_int(dev, eq);
391 /* MSI-X vectors always belong to us */
395 static int __devinit mthca_create_eq(struct mthca_dev *dev,
400 int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
402 u64 *dma_list = NULL;
404 void *mailbox = NULL;
405 struct mthca_eq_context *eq_context;
410 /* Make sure EQ size is aligned to a power of 2 size. */
411 for (i = 1; i < nent; i <<= 1)
417 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
422 for (i = 0; i < npages; ++i)
423 eq->page_list[i].buf = NULL;
425 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
429 mailbox = kmalloc(sizeof *eq_context + MTHCA_CMD_MAILBOX_EXTRA,
433 eq_context = MAILBOX_ALIGN(mailbox);
435 for (i = 0; i < npages; ++i) {
436 eq->page_list[i].buf = pci_alloc_consistent(dev->pdev,
438 if (!eq->page_list[i].buf)
442 pci_unmap_addr_set(&eq->page_list[i], mapping, t);
444 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
447 for (i = 0; i < nent; ++i)
448 set_eqe_hw(get_eqe(eq, i));
450 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
454 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
455 dma_list, PAGE_SHIFT, npages,
456 0, npages * PAGE_SIZE,
457 MTHCA_MPT_FLAG_LOCAL_WRITE |
458 MTHCA_MPT_FLAG_LOCAL_READ,
461 goto err_out_free_eq;
465 memset(eq_context, 0, sizeof *eq_context);
466 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
468 MTHCA_EQ_STATE_ARMED |
470 eq_context->start = cpu_to_be64(0);
471 eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24 |
473 eq_context->pd = cpu_to_be32(dev->driver_pd.pd_num);
474 eq_context->intr = intr;
475 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
477 err = mthca_SW2HW_EQ(dev, eq_context, eq->eqn, &status);
479 mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
480 goto err_out_free_mr;
483 mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",
486 goto err_out_free_mr;
492 eq->ecr_mask = swab32(1 << eq->eqn);
495 eq_req_not(dev, eq->eqn);
497 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
503 mthca_free_mr(dev, &eq->mr);
506 mthca_free(&dev->eq_table.alloc, eq->eqn);
509 for (i = 0; i < npages; ++i)
510 if (eq->page_list[i].buf)
511 pci_free_consistent(dev->pdev, PAGE_SIZE,
512 eq->page_list[i].buf,
513 pci_unmap_addr(&eq->page_list[i],
516 kfree(eq->page_list);
524 static void mthca_free_eq(struct mthca_dev *dev,
527 void *mailbox = NULL;
530 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
534 mailbox = kmalloc(sizeof (struct mthca_eq_context) + MTHCA_CMD_MAILBOX_EXTRA,
539 err = mthca_HW2SW_EQ(dev, MAILBOX_ALIGN(mailbox),
542 mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
544 mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n",
548 mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
549 for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
551 printk("[%02x] ", i * 4);
552 printk(" %08x", be32_to_cpup(MAILBOX_ALIGN(mailbox) + i * 4));
553 if ((i + 1) % 4 == 0)
559 mthca_free_mr(dev, &eq->mr);
560 for (i = 0; i < npages; ++i)
561 pci_free_consistent(dev->pdev, PAGE_SIZE,
562 eq->page_list[i].buf,
563 pci_unmap_addr(&eq->page_list[i], mapping));
565 kfree(eq->page_list);
569 static void mthca_free_irqs(struct mthca_dev *dev)
573 if (dev->eq_table.have_irq)
574 free_irq(dev->pdev->irq, dev);
575 for (i = 0; i < MTHCA_NUM_EQ; ++i)
576 if (dev->eq_table.eq[i].have_irq)
577 free_irq(dev->eq_table.eq[i].msi_x_vector,
578 dev->eq_table.eq + i);
581 int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
587 * We assume that mapping one page is enough for the whole EQ
588 * context table. This is fine with all current HCAs, because
589 * we only use 32 EQs and each EQ uses 32 bytes of context
590 * memory, or 1 KB total.
592 dev->eq_table.icm_virt = icm_virt;
593 dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
594 if (!dev->eq_table.icm_page)
596 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
597 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
598 if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
599 __free_page(dev->eq_table.icm_page);
603 ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);
607 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
608 PCI_DMA_BIDIRECTIONAL);
609 __free_page(dev->eq_table.icm_page);
615 void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev)
619 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);
620 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
621 PCI_DMA_BIDIRECTIONAL);
622 __free_page(dev->eq_table.icm_page);
625 int __devinit mthca_init_eq_table(struct mthca_dev *dev)
632 err = mthca_alloc_init(&dev->eq_table.alloc,
634 dev->limits.num_eqs - 1,
635 dev->limits.reserved_eqs);
639 if (dev->mthca_flags & MTHCA_FLAG_MSI ||
640 dev->mthca_flags & MTHCA_FLAG_MSI_X) {
641 dev->eq_table.clr_mask = 0;
643 dev->eq_table.clr_mask =
644 swab32(1 << (dev->eq_table.inta_pin & 31));
645 dev->eq_table.clr_int = dev->clr_base +
646 (dev->eq_table.inta_pin < 31 ? 4 : 0);
649 intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
650 128 : dev->eq_table.inta_pin;
652 err = mthca_create_eq(dev, dev->limits.num_cqs,
653 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
654 &dev->eq_table.eq[MTHCA_EQ_COMP]);
658 err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE,
659 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
660 &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
664 err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE,
665 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
666 &dev->eq_table.eq[MTHCA_EQ_CMD]);
670 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
671 static const char *eq_name[] = {
672 [MTHCA_EQ_COMP] = DRV_NAME " (comp)",
673 [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
674 [MTHCA_EQ_CMD] = DRV_NAME " (cmd)"
677 for (i = 0; i < MTHCA_NUM_EQ; ++i) {
678 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
679 mthca_msi_x_interrupt, 0,
680 eq_name[i], dev->eq_table.eq + i);
683 dev->eq_table.eq[i].have_irq = 1;
686 err = request_irq(dev->pdev->irq, mthca_interrupt, SA_SHIRQ,
690 dev->eq_table.have_irq = 1;
693 err = mthca_MAP_EQ(dev, async_mask(dev),
694 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
696 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
697 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
699 mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
700 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
702 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
703 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
705 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
706 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
708 mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
709 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
714 mthca_free_irqs(dev);
715 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
718 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
721 mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
724 mthca_alloc_cleanup(&dev->eq_table.alloc);
728 void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev)
733 mthca_free_irqs(dev);
735 mthca_MAP_EQ(dev, async_mask(dev),
736 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
737 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
738 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
740 for (i = 0; i < MTHCA_NUM_EQ; ++i)
741 mthca_free_eq(dev, &dev->eq_table.eq[i]);
743 mthca_alloc_cleanup(&dev->eq_table.alloc);