2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
35 #include <linux/init.h>
41 #include "mthca_dev.h"
42 #include "mthca_cmd.h"
45 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
46 MTHCA_ACK_REQ_FREQ = 10,
47 MTHCA_FLIGHT_LIMIT = 9,
48 MTHCA_UD_HEADER_SIZE = 72 /* largest UD header possible */
52 MTHCA_QP_STATE_RST = 0,
53 MTHCA_QP_STATE_INIT = 1,
54 MTHCA_QP_STATE_RTR = 2,
55 MTHCA_QP_STATE_RTS = 3,
56 MTHCA_QP_STATE_SQE = 4,
57 MTHCA_QP_STATE_SQD = 5,
58 MTHCA_QP_STATE_ERR = 6,
59 MTHCA_QP_STATE_DRAINING = 7
71 MTHCA_QP_PM_MIGRATED = 0x3,
72 MTHCA_QP_PM_ARMED = 0x0,
73 MTHCA_QP_PM_REARM = 0x1
77 /* qp_context flags */
78 MTHCA_QP_BIT_DE = 1 << 8,
80 MTHCA_QP_BIT_SRE = 1 << 15,
81 MTHCA_QP_BIT_SWE = 1 << 14,
82 MTHCA_QP_BIT_SAE = 1 << 13,
83 MTHCA_QP_BIT_SIC = 1 << 4,
84 MTHCA_QP_BIT_SSC = 1 << 3,
86 MTHCA_QP_BIT_RRE = 1 << 15,
87 MTHCA_QP_BIT_RWE = 1 << 14,
88 MTHCA_QP_BIT_RAE = 1 << 13,
89 MTHCA_QP_BIT_RIC = 1 << 4,
90 MTHCA_QP_BIT_RSC = 1 << 3
93 struct mthca_qp_path {
102 u32 sl_tclass_flowlabel;
104 } __attribute__((packed));
106 struct mthca_qp_context {
114 struct mthca_qp_path pri_path;
115 struct mthca_qp_path alt_path;
136 } __attribute__((packed));
138 struct mthca_qp_param {
141 struct mthca_qp_context context;
143 } __attribute__((packed));
146 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
147 MTHCA_QP_OPTPAR_RRE = 1 << 1,
148 MTHCA_QP_OPTPAR_RAE = 1 << 2,
149 MTHCA_QP_OPTPAR_RWE = 1 << 3,
150 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
151 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
152 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
153 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
154 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
155 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
156 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
157 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
158 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
159 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
160 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
161 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
162 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
166 MTHCA_OPCODE_NOP = 0x00,
167 MTHCA_OPCODE_RDMA_WRITE = 0x08,
168 MTHCA_OPCODE_RDMA_WRITE_IMM = 0x09,
169 MTHCA_OPCODE_SEND = 0x0a,
170 MTHCA_OPCODE_SEND_IMM = 0x0b,
171 MTHCA_OPCODE_RDMA_READ = 0x10,
172 MTHCA_OPCODE_ATOMIC_CS = 0x11,
173 MTHCA_OPCODE_ATOMIC_FA = 0x12,
174 MTHCA_OPCODE_BIND_MW = 0x18,
175 MTHCA_OPCODE_INVALID = 0xff
179 MTHCA_NEXT_DBD = 1 << 7,
180 MTHCA_NEXT_FENCE = 1 << 6,
181 MTHCA_NEXT_CQ_UPDATE = 1 << 3,
182 MTHCA_NEXT_EVENT_GEN = 1 << 2,
183 MTHCA_NEXT_SOLICIT = 1 << 1,
185 MTHCA_MLX_VL15 = 1 << 17,
186 MTHCA_MLX_SLR = 1 << 16
189 struct mthca_next_seg {
190 u32 nda_op; /* [31:6] next WQE [4:0] next opcode */
191 u32 ee_nds; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
192 u32 flags; /* [3] CQ [2] Event [1] Solicit */
193 u32 imm; /* immediate data */
196 struct mthca_ud_seg {
206 struct mthca_bind_seg {
207 u32 flags; /* [31] Atomic [30] rem write [29] rem read */
215 struct mthca_raddr_seg {
221 struct mthca_atomic_seg {
226 struct mthca_data_seg {
232 struct mthca_mlx_seg {
235 u32 flags; /* [17] VL15 [16] SLR [14:12] static rate
236 [11:8] SL [3] C [2] E */
241 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
243 return qp->qpn >= dev->qp_table.sqp_start &&
244 qp->qpn <= dev->qp_table.sqp_start + 3;
247 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
249 return qp->qpn >= dev->qp_table.sqp_start &&
250 qp->qpn <= dev->qp_table.sqp_start + 1;
253 static void *get_recv_wqe(struct mthca_qp *qp, int n)
256 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
258 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
259 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
262 static void *get_send_wqe(struct mthca_qp *qp, int n)
265 return qp->queue.direct.buf + qp->send_wqe_offset +
266 (n << qp->sq.wqe_shift);
268 return qp->queue.page_list[(qp->send_wqe_offset +
269 (n << qp->sq.wqe_shift)) >>
271 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
275 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
276 enum ib_event_type event_type)
279 struct ib_event event;
281 spin_lock(&dev->qp_table.lock);
282 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
284 atomic_inc(&qp->refcount);
285 spin_unlock(&dev->qp_table.lock);
288 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
292 event.device = &dev->ib_dev;
293 event.event = event_type;
294 event.element.qp = &qp->ibqp;
295 if (qp->ibqp.event_handler)
296 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
298 if (atomic_dec_and_test(&qp->refcount))
302 static int to_mthca_state(enum ib_qp_state ib_state)
305 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
306 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
307 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
308 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
309 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
310 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
311 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
316 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
318 static int to_mthca_st(int transport)
321 case RC: return MTHCA_QP_ST_RC;
322 case UC: return MTHCA_QP_ST_UC;
323 case UD: return MTHCA_QP_ST_UD;
324 case RD: return MTHCA_QP_ST_RD;
325 case MLX: return MTHCA_QP_ST_MLX;
330 static const struct {
332 u32 req_param[NUM_TRANS];
333 u32 opt_param[NUM_TRANS];
334 } state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
336 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
337 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
339 .trans = MTHCA_TRANS_RST2INIT,
341 [UD] = (IB_QP_PKEY_INDEX |
344 [RC] = (IB_QP_PKEY_INDEX |
347 [MLX] = (IB_QP_PKEY_INDEX |
350 /* bug-for-bug compatibility with VAPI: */
357 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
358 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
360 .trans = MTHCA_TRANS_INIT2INIT,
362 [UD] = (IB_QP_PKEY_INDEX |
365 [RC] = (IB_QP_PKEY_INDEX |
368 [MLX] = (IB_QP_PKEY_INDEX |
373 .trans = MTHCA_TRANS_INIT2RTR,
379 IB_QP_MAX_DEST_RD_ATOMIC |
380 IB_QP_MIN_RNR_TIMER),
383 [UD] = (IB_QP_PKEY_INDEX |
385 [RC] = (IB_QP_ALT_PATH |
388 [MLX] = (IB_QP_PKEY_INDEX |
394 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
395 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
397 .trans = MTHCA_TRANS_RTR2RTS,
400 [RC] = (IB_QP_TIMEOUT |
404 IB_QP_MAX_QP_RD_ATOMIC),
405 [MLX] = IB_QP_SQ_PSN,
408 [UD] = (IB_QP_CUR_STATE |
410 [RC] = (IB_QP_CUR_STATE |
414 IB_QP_MIN_RNR_TIMER |
415 IB_QP_PATH_MIG_STATE),
416 [MLX] = (IB_QP_CUR_STATE |
422 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
423 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
425 .trans = MTHCA_TRANS_RTS2RTS,
427 [UD] = (IB_QP_CUR_STATE |
429 [RC] = (IB_QP_ACCESS_FLAGS |
431 IB_QP_PATH_MIG_STATE |
432 IB_QP_MIN_RNR_TIMER),
433 [MLX] = (IB_QP_CUR_STATE |
438 .trans = MTHCA_TRANS_RTS2SQD,
442 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
443 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
445 .trans = MTHCA_TRANS_SQD2RTS,
447 [UD] = (IB_QP_CUR_STATE |
449 [RC] = (IB_QP_CUR_STATE |
452 IB_QP_MIN_RNR_TIMER |
453 IB_QP_PATH_MIG_STATE),
454 [MLX] = (IB_QP_CUR_STATE |
459 .trans = MTHCA_TRANS_SQD2SQD,
461 [UD] = (IB_QP_PKEY_INDEX |
467 IB_QP_MAX_QP_RD_ATOMIC |
468 IB_QP_MAX_DEST_RD_ATOMIC |
473 IB_QP_MIN_RNR_TIMER |
474 IB_QP_PATH_MIG_STATE),
475 [MLX] = (IB_QP_PKEY_INDEX |
481 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
482 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
484 .trans = MTHCA_TRANS_SQERR2RTS,
486 [UD] = (IB_QP_CUR_STATE |
488 [RC] = (IB_QP_CUR_STATE |
489 IB_QP_MIN_RNR_TIMER),
490 [MLX] = (IB_QP_CUR_STATE |
496 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
497 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }
501 static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
504 if (attr_mask & IB_QP_PKEY_INDEX)
505 sqp->pkey_index = attr->pkey_index;
506 if (attr_mask & IB_QP_QKEY)
507 sqp->qkey = attr->qkey;
508 if (attr_mask & IB_QP_SQ_PSN)
509 sqp->send_psn = attr->sq_psn;
512 static void init_port(struct mthca_dev *dev, int port)
516 struct mthca_init_ib_param param;
518 memset(¶m, 0, sizeof param);
522 param.vl_cap = dev->limits.vl_cap;
523 param.mtu_cap = dev->limits.mtu_cap;
524 param.gid_cap = dev->limits.gid_table_len;
525 param.pkey_cap = dev->limits.pkey_table_len;
527 err = mthca_INIT_IB(dev, ¶m, port, &status);
529 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
531 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
534 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
536 struct mthca_dev *dev = to_mdev(ibqp->device);
537 struct mthca_qp *qp = to_mqp(ibqp);
538 enum ib_qp_state cur_state, new_state;
539 void *mailbox = NULL;
540 struct mthca_qp_param *qp_param;
541 struct mthca_qp_context *qp_context;
542 u32 req_param, opt_param;
546 if (attr_mask & IB_QP_CUR_STATE) {
547 if (attr->cur_qp_state != IB_QPS_RTR &&
548 attr->cur_qp_state != IB_QPS_RTS &&
549 attr->cur_qp_state != IB_QPS_SQD &&
550 attr->cur_qp_state != IB_QPS_SQE)
553 cur_state = attr->cur_qp_state;
555 spin_lock_irq(&qp->lock);
556 cur_state = qp->state;
557 spin_unlock_irq(&qp->lock);
560 if (attr_mask & IB_QP_STATE) {
561 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
563 new_state = attr->qp_state;
565 new_state = cur_state;
567 if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
568 mthca_dbg(dev, "Illegal QP transition "
569 "%d->%d\n", cur_state, new_state);
573 req_param = state_table[cur_state][new_state].req_param[qp->transport];
574 opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
576 if ((req_param & attr_mask) != req_param) {
577 mthca_dbg(dev, "QP transition "
578 "%d->%d missing req attr 0x%08x\n",
579 cur_state, new_state,
580 req_param & ~attr_mask);
584 if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
585 mthca_dbg(dev, "QP transition (transport %d) "
586 "%d->%d has extra attr 0x%08x\n",
588 cur_state, new_state,
589 attr_mask & ~(req_param | opt_param |
594 mailbox = kmalloc(sizeof (*qp_param) + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
597 qp_param = MAILBOX_ALIGN(mailbox);
598 qp_context = &qp_param->context;
599 memset(qp_param, 0, sizeof *qp_param);
601 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
602 (to_mthca_st(qp->transport) << 16));
603 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
604 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
605 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
607 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
608 switch (attr->path_mig_state) {
609 case IB_MIG_MIGRATED:
610 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
613 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
616 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
620 /* leave sched_queue as 0 */
621 if (qp->transport == MLX || qp->transport == UD)
622 qp_context->mtu_msgmax = cpu_to_be32((IB_MTU_2048 << 29) |
624 else if (attr_mask & IB_QP_PATH_MTU) {
625 qp_context->mtu_msgmax = cpu_to_be32((attr->path_mtu << 29) |
628 qp_context->usr_page = cpu_to_be32(MTHCA_KAR_PAGE);
629 qp_context->local_qpn = cpu_to_be32(qp->qpn);
630 if (attr_mask & IB_QP_DEST_QPN) {
631 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
634 if (qp->transport == MLX)
635 qp_context->pri_path.port_pkey |=
636 cpu_to_be32(to_msqp(qp)->port << 24);
638 if (attr_mask & IB_QP_PORT) {
639 qp_context->pri_path.port_pkey |=
640 cpu_to_be32(attr->port_num << 24);
641 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
645 if (attr_mask & IB_QP_PKEY_INDEX) {
646 qp_context->pri_path.port_pkey |=
647 cpu_to_be32(attr->pkey_index);
648 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
651 if (attr_mask & IB_QP_RNR_RETRY) {
652 qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;
653 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);
656 if (attr_mask & IB_QP_AV) {
657 qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f;
658 qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
659 qp_context->pri_path.static_rate = (!!attr->ah_attr.static_rate) << 3;
660 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
661 qp_context->pri_path.g_mylmc |= 1 << 7;
662 qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
663 qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
664 qp_context->pri_path.sl_tclass_flowlabel =
665 cpu_to_be32((attr->ah_attr.sl << 28) |
666 (attr->ah_attr.grh.traffic_class << 20) |
667 (attr->ah_attr.grh.flow_label));
668 memcpy(qp_context->pri_path.rgid,
669 attr->ah_attr.grh.dgid.raw, 16);
671 qp_context->pri_path.sl_tclass_flowlabel =
672 cpu_to_be32(attr->ah_attr.sl << 28);
674 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
677 if (attr_mask & IB_QP_TIMEOUT) {
678 qp_context->pri_path.ackto = attr->timeout;
679 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
685 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
686 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
687 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
688 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
689 (MTHCA_FLIGHT_LIMIT << 24) |
693 if (qp->sq.policy == IB_SIGNAL_ALL_WR)
694 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
695 if (attr_mask & IB_QP_RETRY_CNT) {
696 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
697 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
700 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
701 qp_context->params1 |= cpu_to_be32(min(attr->max_dest_rd_atomic ?
702 ffs(attr->max_dest_rd_atomic) - 1 : 0,
704 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
707 if (attr_mask & IB_QP_SQ_PSN)
708 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
709 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
711 if (attr_mask & IB_QP_ACCESS_FLAGS) {
713 * Only enable RDMA/atomics if we have responder
714 * resources set to a non-zero value.
716 if (qp->resp_depth) {
717 qp_context->params2 |=
718 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
719 MTHCA_QP_BIT_RWE : 0);
720 qp_context->params2 |=
721 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
722 MTHCA_QP_BIT_RRE : 0);
723 qp_context->params2 |=
724 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
725 MTHCA_QP_BIT_RAE : 0);
728 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
729 MTHCA_QP_OPTPAR_RRE |
730 MTHCA_QP_OPTPAR_RAE);
732 qp->atomic_rd_en = attr->qp_access_flags;
735 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
738 if (qp->resp_depth && !attr->max_rd_atomic) {
740 * Lowering our responder resources to zero.
741 * Turn off RDMA/atomics as responder.
742 * (RWE/RRE/RAE in params2 already zero)
744 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
745 MTHCA_QP_OPTPAR_RRE |
746 MTHCA_QP_OPTPAR_RAE);
749 if (!qp->resp_depth && attr->max_rd_atomic) {
751 * Increasing our responder resources from
752 * zero. Turn on RDMA/atomics as appropriate.
754 qp_context->params2 |=
755 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?
756 MTHCA_QP_BIT_RWE : 0);
757 qp_context->params2 |=
758 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
759 MTHCA_QP_BIT_RRE : 0);
760 qp_context->params2 |=
761 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
762 MTHCA_QP_BIT_RAE : 0);
764 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
765 MTHCA_QP_OPTPAR_RRE |
766 MTHCA_QP_OPTPAR_RAE);
770 1 << rra_max < attr->max_rd_atomic &&
771 rra_max < dev->qp_table.rdb_shift;
775 qp_context->params2 |= cpu_to_be32(rra_max << 21);
776 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
778 qp->resp_depth = attr->max_rd_atomic;
781 if (qp->rq.policy == IB_SIGNAL_ALL_WR)
782 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
783 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
784 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
785 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
787 if (attr_mask & IB_QP_RQ_PSN)
788 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
790 qp_context->ra_buff_indx = dev->qp_table.rdb_base +
791 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
792 dev->qp_table.rdb_shift);
794 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
796 if (attr_mask & IB_QP_QKEY) {
797 qp_context->qkey = cpu_to_be32(attr->qkey);
798 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
801 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
802 qp->qpn, 0, qp_param, 0, &status);
804 mthca_warn(dev, "modify QP %d returned status %02x.\n",
805 state_table[cur_state][new_state].trans, status);
810 qp->state = new_state;
815 store_attrs(to_msqp(qp), attr, attr_mask);
818 * If we are moving QP0 to RTR, bring the IB link up; if we
819 * are moving QP0 to RESET or ERROR, bring the link back down.
821 if (is_qp0(dev, qp)) {
822 if (cur_state != IB_QPS_RTR &&
823 new_state == IB_QPS_RTR)
824 init_port(dev, to_msqp(qp)->port);
826 if (cur_state != IB_QPS_RESET &&
827 cur_state != IB_QPS_ERR &&
828 (new_state == IB_QPS_RESET ||
829 new_state == IB_QPS_ERR))
830 mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
837 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
838 * rq.max_gs and sq.max_gs must all be assigned.
839 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
840 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
843 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
851 u64 *dma_list = NULL;
854 size = sizeof (struct mthca_next_seg) +
855 qp->rq.max_gs * sizeof (struct mthca_data_seg);
857 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
861 size = sizeof (struct mthca_next_seg) +
862 qp->sq.max_gs * sizeof (struct mthca_data_seg);
863 if (qp->transport == MLX)
864 size += 2 * sizeof (struct mthca_data_seg);
865 else if (qp->transport == UD)
866 size += sizeof (struct mthca_ud_seg);
867 else /* bind seg is as big as atomic + raddr segs */
868 size += sizeof (struct mthca_bind_seg);
870 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
874 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
875 1 << qp->sq.wqe_shift);
876 size = PAGE_ALIGN(qp->send_wqe_offset +
877 (qp->sq.max << qp->sq.wqe_shift));
879 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
884 if (size <= MTHCA_MAX_DIRECT_QP_SIZE) {
887 shift = get_order(size) + PAGE_SHIFT;
890 mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
893 qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t);
894 if (!qp->queue.direct.buf)
897 pci_unmap_addr_set(&qp->queue.direct, mapping, t);
899 memset(qp->queue.direct.buf, 0, size);
901 while (t & ((1 << shift) - 1)) {
906 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
910 for (i = 0; i < npages; ++i)
911 dma_list[i] = t + i * (1 << shift);
914 npages = size / PAGE_SIZE;
918 mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);
920 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
924 qp->queue.page_list = kmalloc(npages *
925 sizeof *qp->queue.page_list,
927 if (!qp->queue.page_list)
930 for (i = 0; i < npages; ++i) {
931 qp->queue.page_list[i].buf =
932 pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
933 if (!qp->queue.page_list[i].buf)
936 memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);
938 pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
943 err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
945 MTHCA_MPT_FLAG_LOCAL_WRITE |
946 MTHCA_MPT_FLAG_LOCAL_READ,
956 pci_free_consistent(dev->pdev, size,
957 qp->queue.direct.buf,
958 pci_unmap_addr(&qp->queue.direct, mapping));
960 for (i = 0; i < npages; ++i) {
961 if (qp->queue.page_list[i].buf)
962 pci_free_consistent(dev->pdev, PAGE_SIZE,
963 qp->queue.page_list[i].buf,
964 pci_unmap_addr(&qp->queue.page_list[i],
975 static int mthca_alloc_qp_common(struct mthca_dev *dev,
977 struct mthca_cq *send_cq,
978 struct mthca_cq *recv_cq,
979 enum ib_sig_type send_policy,
980 enum ib_sig_type recv_policy,
985 spin_lock_init(&qp->lock);
986 atomic_set(&qp->refcount, 1);
987 qp->state = IB_QPS_RESET;
988 qp->atomic_rd_en = 0;
990 qp->sq.policy = send_policy;
991 qp->rq.policy = recv_policy;
996 qp->rq.last_comp = qp->rq.max - 1;
997 qp->sq.last_comp = qp->sq.max - 1;
1001 err = mthca_alloc_wqe_buf(dev, pd, qp);
1005 int mthca_alloc_qp(struct mthca_dev *dev,
1006 struct mthca_pd *pd,
1007 struct mthca_cq *send_cq,
1008 struct mthca_cq *recv_cq,
1009 enum ib_qp_type type,
1010 enum ib_sig_type send_policy,
1011 enum ib_sig_type recv_policy,
1012 struct mthca_qp *qp)
1017 case IB_QPT_RC: qp->transport = RC; break;
1018 case IB_QPT_UC: qp->transport = UC; break;
1019 case IB_QPT_UD: qp->transport = UD; break;
1020 default: return -EINVAL;
1023 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1027 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1028 send_policy, recv_policy, qp);
1030 mthca_free(&dev->qp_table.alloc, qp->qpn);
1034 spin_lock_irq(&dev->qp_table.lock);
1035 mthca_array_set(&dev->qp_table.qp,
1036 qp->qpn & (dev->limits.num_qps - 1), qp);
1037 spin_unlock_irq(&dev->qp_table.lock);
1042 int mthca_alloc_sqp(struct mthca_dev *dev,
1043 struct mthca_pd *pd,
1044 struct mthca_cq *send_cq,
1045 struct mthca_cq *recv_cq,
1046 enum ib_sig_type send_policy,
1047 enum ib_sig_type recv_policy,
1050 struct mthca_sqp *sqp)
1053 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1055 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1056 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1057 &sqp->header_dma, GFP_KERNEL);
1058 if (!sqp->header_buf)
1061 spin_lock_irq(&dev->qp_table.lock);
1062 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1065 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1066 spin_unlock_irq(&dev->qp_table.lock);
1073 sqp->qp.transport = MLX;
1075 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1076 send_policy, recv_policy,
1081 atomic_inc(&pd->sqp_count);
1086 spin_lock_irq(&dev->qp_table.lock);
1087 mthca_array_clear(&dev->qp_table.qp, mqpn);
1088 spin_unlock_irq(&dev->qp_table.lock);
1091 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1092 sqp->header_buf, sqp->header_dma);
1097 void mthca_free_qp(struct mthca_dev *dev,
1098 struct mthca_qp *qp)
1104 spin_lock_irq(&dev->qp_table.lock);
1105 mthca_array_clear(&dev->qp_table.qp,
1106 qp->qpn & (dev->limits.num_qps - 1));
1107 spin_unlock_irq(&dev->qp_table.lock);
1109 atomic_dec(&qp->refcount);
1110 wait_event(qp->wait, !atomic_read(&qp->refcount));
1112 if (qp->state != IB_QPS_RESET)
1113 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
1115 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn);
1116 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1117 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
1119 mthca_free_mr(dev, &qp->mr);
1121 size = PAGE_ALIGN(qp->send_wqe_offset +
1122 (qp->sq.max << qp->sq.wqe_shift));
1124 if (qp->is_direct) {
1125 pci_free_consistent(dev->pdev, size,
1126 qp->queue.direct.buf,
1127 pci_unmap_addr(&qp->queue.direct, mapping));
1129 for (i = 0; i < size / PAGE_SIZE; ++i) {
1130 pci_free_consistent(dev->pdev, PAGE_SIZE,
1131 qp->queue.page_list[i].buf,
1132 pci_unmap_addr(&qp->queue.page_list[i],
1139 if (is_sqp(dev, qp)) {
1140 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1141 dma_free_coherent(&dev->pdev->dev,
1142 to_msqp(qp)->header_buf_size,
1143 to_msqp(qp)->header_buf,
1144 to_msqp(qp)->header_dma);
1147 mthca_free(&dev->qp_table.alloc, qp->qpn);
1150 /* Create UD header for an MLX send and build a data segment for it */
1151 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1152 int ind, struct ib_send_wr *wr,
1153 struct mthca_mlx_seg *mlx,
1154 struct mthca_data_seg *data)
1159 ib_ud_header_init(256, /* assume a MAD */
1160 sqp->ud_header.grh_present,
1163 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1166 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1167 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1168 (sqp->ud_header.lrh.destination_lid == 0xffff ?
1169 MTHCA_MLX_SLR : 0) |
1170 (sqp->ud_header.lrh.service_level << 8));
1171 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1174 switch (wr->opcode) {
1176 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1177 sqp->ud_header.immediate_present = 0;
1179 case IB_WR_SEND_WITH_IMM:
1180 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1181 sqp->ud_header.immediate_present = 1;
1182 sqp->ud_header.immediate_data = wr->imm_data;
1188 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1189 if (sqp->ud_header.lrh.destination_lid == 0xffff)
1190 sqp->ud_header.lrh.source_lid = 0xffff;
1191 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1192 if (!sqp->qp.ibqp.qp_num)
1193 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1195 &sqp->ud_header.bth.pkey);
1197 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1198 wr->wr.ud.pkey_index,
1199 &sqp->ud_header.bth.pkey);
1200 cpu_to_be16s(&sqp->ud_header.bth.pkey);
1201 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1202 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1203 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1204 sqp->qkey : wr->wr.ud.remote_qkey);
1205 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1207 header_size = ib_ud_header_pack(&sqp->ud_header,
1209 ind * MTHCA_UD_HEADER_SIZE);
1211 data->byte_count = cpu_to_be32(header_size);
1212 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1213 data->addr = cpu_to_be64(sqp->header_dma +
1214 ind * MTHCA_UD_HEADER_SIZE);
1219 int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1220 struct ib_send_wr **bad_wr)
1222 struct mthca_dev *dev = to_mdev(ibqp->device);
1223 struct mthca_qp *qp = to_mqp(ibqp);
1226 unsigned long flags;
1236 static const u8 opcode[] = {
1237 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
1238 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
1239 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
1240 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
1241 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
1242 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
1243 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
1246 spin_lock_irqsave(&qp->lock, flags);
1248 /* XXX check that state is OK to post send */
1252 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1253 if (qp->sq.cur + nreq >= qp->sq.max) {
1254 mthca_err(dev, "SQ full (%d posted, %d max, %d nreq)\n",
1255 qp->sq.cur, qp->sq.max, nreq);
1261 wqe = get_send_wqe(qp, ind);
1262 prev_wqe = qp->sq.last;
1265 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1266 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1267 ((struct mthca_next_seg *) wqe)->flags =
1268 ((wr->send_flags & IB_SEND_SIGNALED) ?
1269 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1270 ((wr->send_flags & IB_SEND_SOLICITED) ?
1271 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1273 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1274 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1275 ((struct mthca_next_seg *) wqe)->flags = wr->imm_data;
1277 wqe += sizeof (struct mthca_next_seg);
1278 size = sizeof (struct mthca_next_seg) / 16;
1280 switch (qp->transport) {
1282 switch (wr->opcode) {
1283 case IB_WR_ATOMIC_CMP_AND_SWP:
1284 case IB_WR_ATOMIC_FETCH_AND_ADD:
1285 ((struct mthca_raddr_seg *) wqe)->raddr =
1286 cpu_to_be64(wr->wr.atomic.remote_addr);
1287 ((struct mthca_raddr_seg *) wqe)->rkey =
1288 cpu_to_be32(wr->wr.atomic.rkey);
1289 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1291 wqe += sizeof (struct mthca_raddr_seg);
1293 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1294 ((struct mthca_atomic_seg *) wqe)->swap_add =
1295 cpu_to_be64(wr->wr.atomic.swap);
1296 ((struct mthca_atomic_seg *) wqe)->compare =
1297 cpu_to_be64(wr->wr.atomic.compare_add);
1299 ((struct mthca_atomic_seg *) wqe)->swap_add =
1300 cpu_to_be64(wr->wr.atomic.compare_add);
1301 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1304 wqe += sizeof (struct mthca_atomic_seg);
1305 size += sizeof (struct mthca_raddr_seg) / 16 +
1306 sizeof (struct mthca_atomic_seg);
1309 case IB_WR_RDMA_WRITE:
1310 case IB_WR_RDMA_WRITE_WITH_IMM:
1311 case IB_WR_RDMA_READ:
1312 ((struct mthca_raddr_seg *) wqe)->raddr =
1313 cpu_to_be64(wr->wr.rdma.remote_addr);
1314 ((struct mthca_raddr_seg *) wqe)->rkey =
1315 cpu_to_be32(wr->wr.rdma.rkey);
1316 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1317 wqe += sizeof (struct mthca_raddr_seg);
1318 size += sizeof (struct mthca_raddr_seg) / 16;
1322 /* No extra segments required for sends */
1329 ((struct mthca_ud_seg *) wqe)->lkey =
1330 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1331 ((struct mthca_ud_seg *) wqe)->av_addr =
1332 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1333 ((struct mthca_ud_seg *) wqe)->dqpn =
1334 cpu_to_be32(wr->wr.ud.remote_qpn);
1335 ((struct mthca_ud_seg *) wqe)->qkey =
1336 cpu_to_be32(wr->wr.ud.remote_qkey);
1338 wqe += sizeof (struct mthca_ud_seg);
1339 size += sizeof (struct mthca_ud_seg) / 16;
1343 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1344 wqe - sizeof (struct mthca_next_seg),
1350 wqe += sizeof (struct mthca_data_seg);
1351 size += sizeof (struct mthca_data_seg) / 16;
1355 if (wr->num_sge > qp->sq.max_gs) {
1356 mthca_err(dev, "too many gathers\n");
1362 for (i = 0; i < wr->num_sge; ++i) {
1363 ((struct mthca_data_seg *) wqe)->byte_count =
1364 cpu_to_be32(wr->sg_list[i].length);
1365 ((struct mthca_data_seg *) wqe)->lkey =
1366 cpu_to_be32(wr->sg_list[i].lkey);
1367 ((struct mthca_data_seg *) wqe)->addr =
1368 cpu_to_be64(wr->sg_list[i].addr);
1369 wqe += sizeof (struct mthca_data_seg);
1370 size += sizeof (struct mthca_data_seg) / 16;
1373 /* Add one more inline data segment for ICRC */
1374 if (qp->transport == MLX) {
1375 ((struct mthca_data_seg *) wqe)->byte_count =
1376 cpu_to_be32((1 << 31) | 4);
1377 ((u32 *) wqe)[1] = 0;
1378 wqe += sizeof (struct mthca_data_seg);
1379 size += sizeof (struct mthca_data_seg) / 16;
1382 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1384 if (wr->opcode >= ARRAY_SIZE(opcode)) {
1385 mthca_err(dev, "opcode invalid\n");
1392 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1393 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1394 qp->send_wqe_offset) |
1395 opcode[wr->opcode]);
1397 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1398 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1403 op0 = opcode[wr->opcode];
1407 if (unlikely(ind >= qp->sq.max))
1415 doorbell[0] = cpu_to_be32(((qp->sq.next << qp->sq.wqe_shift) +
1416 qp->send_wqe_offset) | f0 | op0);
1417 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1421 mthca_write64(doorbell,
1422 dev->kar + MTHCA_SEND_DOORBELL,
1423 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1429 spin_unlock_irqrestore(&qp->lock, flags);
1433 int mthca_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1434 struct ib_recv_wr **bad_wr)
1436 struct mthca_dev *dev = to_mdev(ibqp->device);
1437 struct mthca_qp *qp = to_mqp(ibqp);
1438 unsigned long flags;
1448 spin_lock_irqsave(&qp->lock, flags);
1450 /* XXX check that state is OK to post receive */
1454 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1455 if (qp->rq.cur + nreq >= qp->rq.max) {
1456 mthca_err(dev, "RQ %06x full\n", qp->qpn);
1462 wqe = get_recv_wqe(qp, ind);
1463 prev_wqe = qp->rq.last;
1466 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1467 ((struct mthca_next_seg *) wqe)->ee_nds =
1468 cpu_to_be32(MTHCA_NEXT_DBD);
1469 ((struct mthca_next_seg *) wqe)->flags =
1470 (wr->recv_flags & IB_RECV_SIGNALED) ?
1471 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0;
1473 wqe += sizeof (struct mthca_next_seg);
1474 size = sizeof (struct mthca_next_seg) / 16;
1476 if (wr->num_sge > qp->rq.max_gs) {
1482 for (i = 0; i < wr->num_sge; ++i) {
1483 ((struct mthca_data_seg *) wqe)->byte_count =
1484 cpu_to_be32(wr->sg_list[i].length);
1485 ((struct mthca_data_seg *) wqe)->lkey =
1486 cpu_to_be32(wr->sg_list[i].lkey);
1487 ((struct mthca_data_seg *) wqe)->addr =
1488 cpu_to_be64(wr->sg_list[i].addr);
1489 wqe += sizeof (struct mthca_data_seg);
1490 size += sizeof (struct mthca_data_seg) / 16;
1493 qp->wrid[ind] = wr->wr_id;
1496 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1497 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1499 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1500 cpu_to_be32(MTHCA_NEXT_DBD | size);
1507 if (unlikely(ind >= qp->rq.max))
1515 doorbell[0] = cpu_to_be32((qp->rq.next << qp->rq.wqe_shift) | size0);
1516 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
1520 mthca_write64(doorbell,
1521 dev->kar + MTHCA_RECEIVE_DOORBELL,
1522 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1528 spin_unlock_irqrestore(&qp->lock, flags);
1532 int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,
1533 int index, int *dbd, u32 *new_wqe)
1535 struct mthca_next_seg *next;
1538 next = get_send_wqe(qp, index);
1540 next = get_recv_wqe(qp, index);
1542 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
1543 if (next->ee_nds & cpu_to_be32(0x3f))
1544 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
1545 (next->ee_nds & cpu_to_be32(0x3f));
1552 int __devinit mthca_init_qp_table(struct mthca_dev *dev)
1558 spin_lock_init(&dev->qp_table.lock);
1561 * We reserve 2 extra QPs per port for the special QPs. The
1562 * special QP for port 1 has to be even, so round up.
1564 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
1565 err = mthca_alloc_init(&dev->qp_table.alloc,
1566 dev->limits.num_qps,
1568 dev->qp_table.sqp_start +
1569 MTHCA_MAX_PORTS * 2);
1573 err = mthca_array_init(&dev->qp_table.qp,
1574 dev->limits.num_qps);
1576 mthca_alloc_cleanup(&dev->qp_table.alloc);
1580 for (i = 0; i < 2; ++i) {
1581 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
1582 dev->qp_table.sqp_start + i * 2,
1587 mthca_warn(dev, "CONF_SPECIAL_QP returned "
1588 "status %02x, aborting.\n",
1597 for (i = 0; i < 2; ++i)
1598 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
1600 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
1601 mthca_alloc_cleanup(&dev->qp_table.alloc);
1606 void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
1611 for (i = 0; i < 2; ++i)
1612 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
1614 mthca_alloc_cleanup(&dev->qp_table.alloc);