2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
35 #include <linux/init.h>
39 #include "mthca_dev.h"
40 #include "mthca_cmd.h"
43 MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
47 MTHCA_CQ_ENTRY_SIZE = 0x20
51 * Must be packed because start is 64 bits but only aligned to 32 bits.
53 struct mthca_cq_context {
61 u32 last_notified_index;
62 u32 solicit_producer_index;
67 } __attribute__((packed));
69 #define MTHCA_CQ_STATUS_OK ( 0 << 28)
70 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
71 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
72 #define MTHCA_CQ_FLAG_TR ( 1 << 18)
73 #define MTHCA_CQ_FLAG_OI ( 1 << 17)
74 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
75 #define MTHCA_CQ_STATE_ARMED ( 1 << 8)
76 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
77 #define MTHCA_EQ_STATE_FIRED (10 << 8)
80 MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
84 SYNDROME_LOCAL_LENGTH_ERR = 0x01,
85 SYNDROME_LOCAL_QP_OP_ERR = 0x02,
86 SYNDROME_LOCAL_EEC_OP_ERR = 0x03,
87 SYNDROME_LOCAL_PROT_ERR = 0x04,
88 SYNDROME_WR_FLUSH_ERR = 0x05,
89 SYNDROME_MW_BIND_ERR = 0x06,
90 SYNDROME_BAD_RESP_ERR = 0x10,
91 SYNDROME_LOCAL_ACCESS_ERR = 0x11,
92 SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
93 SYNDROME_REMOTE_ACCESS_ERR = 0x13,
94 SYNDROME_REMOTE_OP_ERR = 0x14,
95 SYNDROME_RETRY_EXC_ERR = 0x15,
96 SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
97 SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20,
98 SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
99 SYNDROME_REMOTE_ABORTED_ERR = 0x22,
100 SYNDROME_INVAL_EECN_ERR = 0x23,
101 SYNDROME_INVAL_EEC_STATE_ERR = 0x24
110 u32 imm_etype_pkey_eec;
119 struct mthca_err_cqe {
132 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
133 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
135 #define MTHCA_CQ_DB_INC_CI (1 << 24)
136 #define MTHCA_CQ_DB_REQ_NOT (2 << 24)
137 #define MTHCA_CQ_DB_REQ_NOT_SOL (3 << 24)
138 #define MTHCA_CQ_DB_SET_CI (4 << 24)
139 #define MTHCA_CQ_DB_REQ_NOT_MULT (5 << 24)
141 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
144 return cq->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
146 return cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
147 + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
150 static inline int cqe_sw(struct mthca_cq *cq, int i)
152 return !(MTHCA_CQ_ENTRY_OWNER_HW &
153 get_cqe(cq, i)->owner);
156 static inline int next_cqe_sw(struct mthca_cq *cq)
158 return cqe_sw(cq, cq->cons_index);
161 static inline void set_cqe_hw(struct mthca_cq *cq, int entry)
163 get_cqe(cq, entry)->owner = MTHCA_CQ_ENTRY_OWNER_HW;
166 static inline void inc_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
171 doorbell[0] = cpu_to_be32(MTHCA_CQ_DB_INC_CI | cq->cqn);
172 doorbell[1] = cpu_to_be32(nent - 1);
174 mthca_write64(doorbell,
175 dev->kar + MTHCA_CQ_DOORBELL,
176 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
179 void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
183 spin_lock(&dev->cq_table.lock);
184 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
186 atomic_inc(&cq->refcount);
187 spin_unlock(&dev->cq_table.lock);
190 mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
194 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
196 if (atomic_dec_and_test(&cq->refcount))
200 void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
203 struct mthca_cqe *cqe;
207 spin_lock_irq(&dev->cq_table.lock);
208 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
210 atomic_inc(&cq->refcount);
211 spin_unlock_irq(&dev->cq_table.lock);
216 spin_lock_irq(&cq->lock);
219 * First we need to find the current producer index, so we
220 * know where to start cleaning from. It doesn't matter if HW
221 * adds new entries after this loop -- the QP we're worried
222 * about is already in RESET, so the new entries won't come
223 * from our QP and therefore don't need to be checked.
225 for (prod_index = cq->cons_index;
226 cqe_sw(cq, prod_index & cq->ibcq.cqe);
228 if (prod_index == cq->cons_index + cq->ibcq.cqe)
232 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
233 qpn, cqn, cq->cons_index, prod_index);
236 * Now sweep backwards through the CQ, removing CQ entries
237 * that match our QP by copying older entries on top of them.
239 while (prod_index > cq->cons_index) {
240 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
241 if (cqe->my_qpn == cpu_to_be32(qpn))
244 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
247 MTHCA_CQ_ENTRY_SIZE);
253 inc_cons_index(dev, cq, nfreed);
254 cq->cons_index = (cq->cons_index + nfreed) & cq->ibcq.cqe;
257 spin_unlock_irq(&cq->lock);
258 if (atomic_dec_and_test(&cq->refcount))
262 static int handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
263 struct mthca_qp *qp, int wqe_index, int is_send,
264 struct mthca_err_cqe *cqe,
265 struct ib_wc *entry, int *free_cqe)
271 if (1 && cqe->syndrome != SYNDROME_WR_FLUSH_ERR) {
274 mthca_dbg(dev, "%x/%d: error CQE -> QPN %06x, WQE @ %08x\n",
275 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
276 be32_to_cpu(cqe->wqe));
278 for (j = 0; j < 8; ++j)
279 printk(KERN_DEBUG " [%2x] %08x\n",
280 j * 4, be32_to_cpu(((u32 *) cqe)[j]));
284 * For completions in error, only work request ID, status (and
285 * freed resource count for RD) have to be set.
287 switch (cqe->syndrome) {
288 case SYNDROME_LOCAL_LENGTH_ERR:
289 entry->status = IB_WC_LOC_LEN_ERR;
291 case SYNDROME_LOCAL_QP_OP_ERR:
292 entry->status = IB_WC_LOC_QP_OP_ERR;
294 case SYNDROME_LOCAL_EEC_OP_ERR:
295 entry->status = IB_WC_LOC_EEC_OP_ERR;
297 case SYNDROME_LOCAL_PROT_ERR:
298 entry->status = IB_WC_LOC_PROT_ERR;
300 case SYNDROME_WR_FLUSH_ERR:
301 entry->status = IB_WC_WR_FLUSH_ERR;
303 case SYNDROME_MW_BIND_ERR:
304 entry->status = IB_WC_MW_BIND_ERR;
306 case SYNDROME_BAD_RESP_ERR:
307 entry->status = IB_WC_BAD_RESP_ERR;
309 case SYNDROME_LOCAL_ACCESS_ERR:
310 entry->status = IB_WC_LOC_ACCESS_ERR;
312 case SYNDROME_REMOTE_INVAL_REQ_ERR:
313 entry->status = IB_WC_REM_INV_REQ_ERR;
315 case SYNDROME_REMOTE_ACCESS_ERR:
316 entry->status = IB_WC_REM_ACCESS_ERR;
318 case SYNDROME_REMOTE_OP_ERR:
319 entry->status = IB_WC_REM_OP_ERR;
321 case SYNDROME_RETRY_EXC_ERR:
322 entry->status = IB_WC_RETRY_EXC_ERR;
324 case SYNDROME_RNR_RETRY_EXC_ERR:
325 entry->status = IB_WC_RNR_RETRY_EXC_ERR;
327 case SYNDROME_LOCAL_RDD_VIOL_ERR:
328 entry->status = IB_WC_LOC_RDD_VIOL_ERR;
330 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
331 entry->status = IB_WC_REM_INV_RD_REQ_ERR;
333 case SYNDROME_REMOTE_ABORTED_ERR:
334 entry->status = IB_WC_REM_ABORT_ERR;
336 case SYNDROME_INVAL_EECN_ERR:
337 entry->status = IB_WC_INV_EECN_ERR;
339 case SYNDROME_INVAL_EEC_STATE_ERR:
340 entry->status = IB_WC_INV_EEC_STATE_ERR;
343 entry->status = IB_WC_GENERAL_ERR;
347 err = mthca_free_err_wqe(qp, is_send, wqe_index, &dbd, &new_wqe);
352 * If we're at the end of the WQE chain, or we've used up our
353 * doorbell count, free the CQE. Otherwise just update it for
354 * the next poll operation.
356 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
359 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
361 cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
368 static void dump_cqe(struct mthca_cqe *cqe)
372 for (j = 0; j < 8; ++j)
373 printk(KERN_DEBUG " [%2x] %08x\n",
374 j * 4, be32_to_cpu(((u32 *) cqe)[j]));
377 static inline int mthca_poll_one(struct mthca_dev *dev,
379 struct mthca_qp **cur_qp,
384 struct mthca_cqe *cqe;
391 if (!next_cqe_sw(cq))
395 * Make sure we read CQ entry contents after we've checked the
400 cqe = get_cqe(cq, cq->cons_index);
403 mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
404 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
405 be32_to_cpu(cqe->wqe));
410 if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
411 MTHCA_ERROR_CQE_OPCODE_MASK) {
413 is_send = cqe->opcode & 1;
415 is_send = cqe->is_send & 0x80;
417 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
421 inc_cons_index(dev, cq, *freed);
424 spin_unlock(&(*cur_qp)->lock);
427 spin_lock(&dev->qp_table.lock);
428 *cur_qp = mthca_array_get(&dev->qp_table.qp,
429 be32_to_cpu(cqe->my_qpn) &
430 (dev->limits.num_qps - 1));
432 atomic_inc(&(*cur_qp)->refcount);
433 spin_unlock(&dev->qp_table.lock);
436 mthca_warn(dev, "CQ entry for unknown QP %06x\n",
437 be32_to_cpu(cqe->my_qpn) & 0xffffff);
442 spin_lock(&(*cur_qp)->lock);
445 entry->qp_num = (*cur_qp)->qpn;
449 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
451 entry->wr_id = (*cur_qp)->wrid[wqe_index +
455 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
456 entry->wr_id = (*cur_qp)->wrid[wqe_index];
459 if (wq->last_comp < wqe_index)
460 wq->cur -= wqe_index - wq->last_comp;
462 wq->cur -= wq->max - wq->last_comp + wqe_index;
464 wq->last_comp = wqe_index;
467 mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n",
468 is_send ? "Send" : "Receive",
469 (*cur_qp)->qpn, wqe_index, wq->max);
472 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
473 (struct mthca_err_cqe *) cqe,
479 entry->opcode = IB_WC_SEND; /* XXX */
481 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
482 switch (cqe->opcode & 0x1f) {
483 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
484 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
485 entry->wc_flags = IB_WC_WITH_IMM;
486 entry->imm_data = cqe->imm_etype_pkey_eec;
487 entry->opcode = IB_WC_RECV;
489 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
490 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
491 entry->wc_flags = IB_WC_WITH_IMM;
492 entry->imm_data = cqe->imm_etype_pkey_eec;
493 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
497 entry->opcode = IB_WC_RECV;
500 entry->slid = be16_to_cpu(cqe->rlid);
501 entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12;
502 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
503 entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;
504 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
505 entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?
509 entry->status = IB_WC_SUCCESS;
513 set_cqe_hw(cq, cq->cons_index);
515 cq->cons_index = (cq->cons_index + 1) & cq->ibcq.cqe;
521 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
524 struct mthca_dev *dev = to_mdev(ibcq->device);
525 struct mthca_cq *cq = to_mcq(ibcq);
526 struct mthca_qp *qp = NULL;
532 spin_lock_irqsave(&cq->lock, flags);
534 for (npolled = 0; npolled < num_entries; ++npolled) {
535 err = mthca_poll_one(dev, cq, &qp,
536 &freed, entry + npolled);
543 inc_cons_index(dev, cq, freed);
547 spin_unlock(&qp->lock);
548 if (atomic_dec_and_test(&qp->refcount))
553 spin_unlock_irqrestore(&cq->lock, flags);
555 return err == 0 || err == -EAGAIN ? npolled : err;
558 void mthca_arm_cq(struct mthca_dev *dev, struct mthca_cq *cq,
563 doorbell[0] = cpu_to_be32((solicited ?
564 MTHCA_CQ_DB_REQ_NOT_SOL :
565 MTHCA_CQ_DB_REQ_NOT) |
567 doorbell[1] = 0xffffffff;
569 mthca_write64(doorbell,
570 dev->kar + MTHCA_CQ_DOORBELL,
571 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
574 int mthca_init_cq(struct mthca_dev *dev, int nent,
577 int size = nent * MTHCA_CQ_ENTRY_SIZE;
579 void *mailbox = NULL;
581 u64 *dma_list = NULL;
582 struct mthca_cq_context *cq_context;
589 mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA,
594 cq_context = MAILBOX_ALIGN(mailbox);
596 if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
598 mthca_dbg(dev, "Creating direct CQ of size %d\n", size);
602 shift = get_order(size) + PAGE_SHIFT;
604 cq->queue.direct.buf = pci_alloc_consistent(dev->pdev,
606 if (!cq->queue.direct.buf)
609 pci_unmap_addr_set(&cq->queue.direct, mapping, t);
611 memset(cq->queue.direct.buf, 0, size);
613 while (t & ((1 << shift) - 1)) {
618 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
622 for (i = 0; i < npages; ++i)
623 dma_list[i] = t + i * (1 << shift);
626 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
630 mthca_dbg(dev, "Creating indirect CQ with %d pages\n", npages);
632 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
636 cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
638 if (!cq->queue.page_list)
641 for (i = 0; i < npages; ++i)
642 cq->queue.page_list[i].buf = NULL;
644 for (i = 0; i < npages; ++i) {
645 cq->queue.page_list[i].buf =
646 pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
647 if (!cq->queue.page_list[i].buf)
651 pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);
653 memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);
657 for (i = 0; i < nent; ++i)
660 cq->cqn = mthca_alloc(&dev->cq_table.alloc);
664 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
665 dma_list, shift, npages,
667 MTHCA_MPT_FLAG_LOCAL_WRITE |
668 MTHCA_MPT_FLAG_LOCAL_READ,
671 goto err_out_free_cq;
673 spin_lock_init(&cq->lock);
674 atomic_set(&cq->refcount, 1);
675 init_waitqueue_head(&cq->wait);
677 memset(cq_context, 0, sizeof *cq_context);
678 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
679 MTHCA_CQ_STATE_DISARMED |
681 cq_context->start = cpu_to_be64(0);
682 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24 |
684 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
685 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
686 cq_context->pd = cpu_to_be32(dev->driver_pd.pd_num);
687 cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey);
688 cq_context->cqn = cpu_to_be32(cq->cqn);
690 err = mthca_SW2HW_CQ(dev, cq_context, cq->cqn, &status);
692 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
693 goto err_out_free_mr;
697 mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",
700 goto err_out_free_mr;
703 spin_lock_irq(&dev->cq_table.lock);
704 if (mthca_array_set(&dev->cq_table.cq,
705 cq->cqn & (dev->limits.num_cqs - 1),
707 spin_unlock_irq(&dev->cq_table.lock);
708 goto err_out_free_mr;
710 spin_unlock_irq(&dev->cq_table.lock);
720 mthca_free_mr(dev, &cq->mr);
723 mthca_free(&dev->cq_table.alloc, cq->cqn);
727 pci_free_consistent(dev->pdev, size,
728 cq->queue.direct.buf,
729 pci_unmap_addr(&cq->queue.direct, mapping));
731 for (i = 0; i < npages; ++i)
732 if (cq->queue.page_list[i].buf)
733 pci_free_consistent(dev->pdev, PAGE_SIZE,
734 cq->queue.page_list[i].buf,
735 pci_unmap_addr(&cq->queue.page_list[i],
738 kfree(cq->queue.page_list);
748 void mthca_free_cq(struct mthca_dev *dev,
757 mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA,
760 mthca_warn(dev, "No memory for mailbox to free CQ.\n");
764 err = mthca_HW2SW_CQ(dev, MAILBOX_ALIGN(mailbox), cq->cqn, &status);
766 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
768 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n",
772 u32 *ctx = MAILBOX_ALIGN(mailbox);
775 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
776 cq->cqn, cq->cons_index, next_cqe_sw(cq));
777 for (j = 0; j < 16; ++j)
778 printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
781 spin_lock_irq(&dev->cq_table.lock);
782 mthca_array_clear(&dev->cq_table.cq,
783 cq->cqn & (dev->limits.num_cqs - 1));
784 spin_unlock_irq(&dev->cq_table.lock);
786 atomic_dec(&cq->refcount);
787 wait_event(cq->wait, !atomic_read(&cq->refcount));
789 mthca_free_mr(dev, &cq->mr);
792 pci_free_consistent(dev->pdev,
793 (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
794 cq->queue.direct.buf,
795 pci_unmap_addr(&cq->queue.direct,
801 i < ((cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE + PAGE_SIZE - 1) /
804 pci_free_consistent(dev->pdev, PAGE_SIZE,
805 cq->queue.page_list[i].buf,
806 pci_unmap_addr(&cq->queue.page_list[i],
809 kfree(cq->queue.page_list);
812 mthca_free(&dev->cq_table.alloc, cq->cqn);
816 int __devinit mthca_init_cq_table(struct mthca_dev *dev)
820 spin_lock_init(&dev->cq_table.lock);
822 err = mthca_alloc_init(&dev->cq_table.alloc,
825 dev->limits.reserved_cqs);
829 err = mthca_array_init(&dev->cq_table.cq,
830 dev->limits.num_cqs);
832 mthca_alloc_cleanup(&dev->cq_table.alloc);
837 void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev)
839 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
840 mthca_alloc_cleanup(&dev->cq_table.alloc);