fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / drivers / infiniband / hw / mthca / mthca_qp.c
index e407ea9..5f5214c 100644 (file)
@@ -1,5 +1,8 @@
 /*
  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
  */
 
-#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
 
-#include <ib_verbs.h>
-#include <ib_cache.h>
-#include <ib_pack.h>
+#include <asm/io.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_pack.h>
 
 #include "mthca_dev.h"
 #include "mthca_cmd.h"
+#include "mthca_memfree.h"
+#include "mthca_wqe.h"
 
 enum {
        MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
        MTHCA_ACK_REQ_FREQ       = 10,
        MTHCA_FLIGHT_LIMIT       = 9,
-       MTHCA_UD_HEADER_SIZE     = 72 /* largest UD header possible */
+       MTHCA_UD_HEADER_SIZE     = 72, /* largest UD header possible */
+       MTHCA_INLINE_HEADER_SIZE = 4,  /* data segment overhead for inline */
+       MTHCA_INLINE_CHUNK_SIZE  = 16  /* inline data segment chunk */
 };
 
 enum {
@@ -90,56 +100,67 @@ enum {
        MTHCA_QP_BIT_RSC = 1 <<  3
 };
 
+enum {
+       MTHCA_SEND_DOORBELL_FENCE = 1 << 5
+};
+
 struct mthca_qp_path {
-       u32 port_pkey;
-       u8  rnr_retry;
-       u8  g_mylmc;
-       u16 rlid;
-       u8  ackto;
-       u8  mgid_index;
-       u8  static_rate;
-       u8  hop_limit;
-       u32 sl_tclass_flowlabel;
-       u8  rgid[16];
+       __be32 port_pkey;
+       u8     rnr_retry;
+       u8     g_mylmc;
+       __be16 rlid;
+       u8     ackto;
+       u8     mgid_index;
+       u8     static_rate;
+       u8     hop_limit;
+       __be32 sl_tclass_flowlabel;
+       u8     rgid[16];
 } __attribute__((packed));
 
 struct mthca_qp_context {
-       u32 flags;
-       u32 sched_queue;
-       u32 mtu_msgmax;
-       u32 usr_page;
-       u32 local_qpn;
-       u32 remote_qpn;
-       u32 reserved1[2];
+       __be32 flags;
+       __be32 tavor_sched_queue; /* Reserved on Arbel */
+       u8     mtu_msgmax;
+       u8     rq_size_stride;  /* Reserved on Tavor */
+       u8     sq_size_stride;  /* Reserved on Tavor */
+       u8     rlkey_arbel_sched_queue; /* Reserved on Tavor */
+       __be32 usr_page;
+       __be32 local_qpn;
+       __be32 remote_qpn;
+       u32    reserved1[2];
        struct mthca_qp_path pri_path;
        struct mthca_qp_path alt_path;
-       u32 rdd;
-       u32 pd;
-       u32 wqe_base;
-       u32 wqe_lkey;
-       u32 params1;
-       u32 reserved2;
-       u32 next_send_psn;
-       u32 cqn_snd;
-       u32 next_snd_wqe[2];
-       u32 last_acked_psn;
-       u32 ssn;
-       u32 params2;
-       u32 rnr_nextrecvpsn;
-       u32 ra_buff_indx;
-       u32 cqn_rcv;
-       u32 next_rcv_wqe[2];
-       u32 qkey;
-       u32 srqn;
-       u32 rmsn;
-       u32 reserved3[19];
+       __be32 rdd;
+       __be32 pd;
+       __be32 wqe_base;
+       __be32 wqe_lkey;
+       __be32 params1;
+       __be32 reserved2;
+       __be32 next_send_psn;
+       __be32 cqn_snd;
+       __be32 snd_wqe_base_l;  /* Next send WQE on Tavor */
+       __be32 snd_db_index;    /* (debugging only entries) */
+       __be32 last_acked_psn;
+       __be32 ssn;
+       __be32 params2;
+       __be32 rnr_nextrecvpsn;
+       __be32 ra_buff_indx;
+       __be32 cqn_rcv;
+       __be32 rcv_wqe_base_l;  /* Next recv WQE on Tavor */
+       __be32 rcv_db_index;    /* (debugging only entries) */
+       __be32 qkey;
+       __be32 srqn;
+       __be32 rmsn;
+       __be16 rq_wqe_counter;  /* reserved on Tavor */
+       __be16 sq_wqe_counter;  /* reserved on Tavor */
+       u32    reserved3[18];
 } __attribute__((packed));
 
 struct mthca_qp_param {
-       u32 opt_param_mask;
-       u32 reserved1;
+       __be32 opt_param_mask;
+       u32    reserved1;
        struct mthca_qp_context context;
-       u32 reserved2[62];
+       u32    reserved2[62];
 } __attribute__((packed));
 
 enum {
@@ -162,80 +183,14 @@ enum {
        MTHCA_QP_OPTPAR_SCHED_QUEUE       = 1 << 16
 };
 
-enum {
-       MTHCA_OPCODE_NOP            = 0x00,
-       MTHCA_OPCODE_RDMA_WRITE     = 0x08,
-       MTHCA_OPCODE_RDMA_WRITE_IMM = 0x09,
-       MTHCA_OPCODE_SEND           = 0x0a,
-       MTHCA_OPCODE_SEND_IMM       = 0x0b,
-       MTHCA_OPCODE_RDMA_READ      = 0x10,
-       MTHCA_OPCODE_ATOMIC_CS      = 0x11,
-       MTHCA_OPCODE_ATOMIC_FA      = 0x12,
-       MTHCA_OPCODE_BIND_MW        = 0x18,
-       MTHCA_OPCODE_INVALID        = 0xff
-};
-
-enum {
-       MTHCA_NEXT_DBD       = 1 << 7,
-       MTHCA_NEXT_FENCE     = 1 << 6,
-       MTHCA_NEXT_CQ_UPDATE = 1 << 3,
-       MTHCA_NEXT_EVENT_GEN = 1 << 2,
-       MTHCA_NEXT_SOLICIT   = 1 << 1,
-
-       MTHCA_MLX_VL15       = 1 << 17,
-       MTHCA_MLX_SLR        = 1 << 16
-};
-
-struct mthca_next_seg {
-       u32 nda_op;             /* [31:6] next WQE [4:0] next opcode */
-       u32 ee_nds;             /* [31:8] next EE  [7] DBD [6] F [5:0] next WQE size */
-       u32 flags;              /* [3] CQ [2] Event [1] Solicit */
-       u32 imm;                /* immediate data */
-};
-
-struct mthca_ud_seg {
-       u32 reserved1;
-       u32 lkey;
-       u64 av_addr;
-       u32 reserved2[4];
-       u32 dqpn;
-       u32 qkey;
-       u32 reserved3[2];
-};
-
-struct mthca_bind_seg {
-       u32 flags;              /* [31] Atomic [30] rem write [29] rem read */
-       u32 reserved;
-       u32 new_rkey;
-       u32 lkey;
-       u64 addr;
-       u64 length;
-};
-
-struct mthca_raddr_seg {
-       u64 raddr;
-       u32 rkey;
-       u32 reserved;
-};
-
-struct mthca_atomic_seg {
-       u64 swap_add;
-       u64 compare;
-};
-
-struct mthca_data_seg {
-       u32 byte_count;
-       u32 lkey;
-       u64 addr;
-};
-
-struct mthca_mlx_seg {
-       u32 nda_op;
-       u32 nds;
-       u32 flags;              /* [17] VL15 [16] SLR [14:12] static rate
-                                  [11:8] SL [3] C [2] E */
-       u16 rlid;
-       u16 vcrc;
+static const u8 mthca_opcode[] = {
+       [IB_WR_SEND]                 = MTHCA_OPCODE_SEND,
+       [IB_WR_SEND_WITH_IMM]        = MTHCA_OPCODE_SEND_IMM,
+       [IB_WR_RDMA_WRITE]           = MTHCA_OPCODE_RDMA_WRITE,
+       [IB_WR_RDMA_WRITE_WITH_IMM]  = MTHCA_OPCODE_RDMA_WRITE_IMM,
+       [IB_WR_RDMA_READ]            = MTHCA_OPCODE_RDMA_READ,
+       [IB_WR_ATOMIC_CMP_AND_SWP]   = MTHCA_OPCODE_ATOMIC_CS,
+       [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
 };
 
 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
@@ -272,6 +227,14 @@ static void *get_send_wqe(struct mthca_qp *qp, int n)
                         (PAGE_SIZE - 1));
 }
 
+static void mthca_wq_reset(struct mthca_wq *wq)
+{
+       wq->next_ind  = 0;
+       wq->last_comp = wq->max - 1;
+       wq->head      = 0;
+       wq->tail      = 0;
+}
+
 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
                    enum ib_event_type event_type)
 {
@@ -281,7 +244,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
        spin_lock(&dev->qp_table.lock);
        qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
        if (qp)
-               atomic_inc(&qp->refcount);
+               ++qp->refcount;
        spin_unlock(&dev->qp_table.lock);
 
        if (!qp) {
@@ -289,14 +252,19 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
                return;
        }
 
+       if (event_type == IB_EVENT_PATH_MIG)
+               qp->port = qp->alt_port;
+
        event.device      = &dev->ib_dev;
        event.event       = event_type;
        event.element.qp  = &qp->ibqp;
        if (qp->ibqp.event_handler)
                qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
 
-       if (atomic_dec_and_test(&qp->refcount))
+       spin_lock(&dev->qp_table.lock);
+       if (!--qp->refcount)
                wake_up(&qp->wait);
+       spin_unlock(&dev->qp_table.lock);
 }
 
 static int to_mthca_state(enum ib_qp_state ib_state)
@@ -327,177 +295,6 @@ static int to_mthca_st(int transport)
        }
 }
 
-static const struct {
-       int trans;
-       u32 req_param[NUM_TRANS];
-       u32 opt_param[NUM_TRANS];
-} state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
-       [IB_QPS_RESET] = {
-               [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
-               [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
-               [IB_QPS_INIT]  = {
-                       .trans = MTHCA_TRANS_RST2INIT,
-                       .req_param = {
-                               [UD]  = (IB_QP_PKEY_INDEX |
-                                        IB_QP_PORT       |
-                                        IB_QP_QKEY),
-                               [RC]  = (IB_QP_PKEY_INDEX |
-                                        IB_QP_PORT       |
-                                        IB_QP_ACCESS_FLAGS),
-                               [MLX] = (IB_QP_PKEY_INDEX |
-                                        IB_QP_QKEY),
-                       },
-                       /* bug-for-bug compatibility with VAPI: */
-                       .opt_param = {
-                               [MLX] = IB_QP_PORT
-                       }
-               },
-       },
-       [IB_QPS_INIT]  = {
-               [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
-               [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
-               [IB_QPS_INIT]  = {
-                       .trans = MTHCA_TRANS_INIT2INIT,
-                       .opt_param = {
-                               [UD]  = (IB_QP_PKEY_INDEX |
-                                        IB_QP_PORT       |
-                                        IB_QP_QKEY),
-                               [RC]  = (IB_QP_PKEY_INDEX |
-                                        IB_QP_PORT       |
-                                        IB_QP_ACCESS_FLAGS),
-                               [MLX] = (IB_QP_PKEY_INDEX |
-                                        IB_QP_QKEY),
-                       }
-               },
-               [IB_QPS_RTR]   = {
-                       .trans = MTHCA_TRANS_INIT2RTR,
-                       .req_param = {
-                               [RC]  = (IB_QP_AV                  |
-                                        IB_QP_PATH_MTU            |
-                                        IB_QP_DEST_QPN            |
-                                        IB_QP_RQ_PSN              |
-                                        IB_QP_MAX_DEST_RD_ATOMIC  |
-                                        IB_QP_MIN_RNR_TIMER),
-                       },
-                       .opt_param = {
-                               [UD]  = (IB_QP_PKEY_INDEX |
-                                        IB_QP_QKEY),
-                               [RC]  = (IB_QP_ALT_PATH     |
-                                        IB_QP_ACCESS_FLAGS |
-                                        IB_QP_PKEY_INDEX),
-                               [MLX] = (IB_QP_PKEY_INDEX |
-                                        IB_QP_QKEY),
-                       }
-               }
-       },
-       [IB_QPS_RTR]   = {
-               [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
-               [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
-               [IB_QPS_RTS]   = {
-                       .trans = MTHCA_TRANS_RTR2RTS,
-                       .req_param = {
-                               [UD]  = IB_QP_SQ_PSN,
-                               [RC]  = (IB_QP_TIMEOUT           |
-                                        IB_QP_RETRY_CNT         |
-                                        IB_QP_RNR_RETRY         |
-                                        IB_QP_SQ_PSN            |
-                                        IB_QP_MAX_QP_RD_ATOMIC),
-                               [MLX] = IB_QP_SQ_PSN,
-                       },
-                       .opt_param = {
-                               [UD]  = (IB_QP_CUR_STATE             |
-                                        IB_QP_QKEY),
-                               [RC]  = (IB_QP_CUR_STATE             |
-                                        IB_QP_ALT_PATH              |
-                                        IB_QP_ACCESS_FLAGS          |
-                                        IB_QP_PKEY_INDEX            |
-                                        IB_QP_MIN_RNR_TIMER         |
-                                        IB_QP_PATH_MIG_STATE),
-                               [MLX] = (IB_QP_CUR_STATE             |
-                                        IB_QP_QKEY),
-                       }
-               }
-       },
-       [IB_QPS_RTS]   = {
-               [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
-               [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
-               [IB_QPS_RTS]   = {
-                       .trans = MTHCA_TRANS_RTS2RTS,
-                       .opt_param = {
-                               [UD]  = (IB_QP_CUR_STATE             |
-                                        IB_QP_QKEY),
-                               [RC]  = (IB_QP_ACCESS_FLAGS          |
-                                        IB_QP_ALT_PATH              |
-                                        IB_QP_PATH_MIG_STATE        |
-                                        IB_QP_MIN_RNR_TIMER),
-                               [MLX] = (IB_QP_CUR_STATE             |
-                                        IB_QP_QKEY),
-                       }
-               },
-               [IB_QPS_SQD]   = {
-                       .trans = MTHCA_TRANS_RTS2SQD,
-               },
-       },
-       [IB_QPS_SQD]   = {
-               [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
-               [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
-               [IB_QPS_RTS]   = {
-                       .trans = MTHCA_TRANS_SQD2RTS,
-                       .opt_param = {
-                               [UD]  = (IB_QP_CUR_STATE             |
-                                        IB_QP_QKEY),
-                               [RC]  = (IB_QP_CUR_STATE             |
-                                        IB_QP_ALT_PATH              |
-                                        IB_QP_ACCESS_FLAGS          |
-                                        IB_QP_MIN_RNR_TIMER         |
-                                        IB_QP_PATH_MIG_STATE),
-                               [MLX] = (IB_QP_CUR_STATE             |
-                                        IB_QP_QKEY),
-                       }
-               },
-               [IB_QPS_SQD]   = {
-                       .trans = MTHCA_TRANS_SQD2SQD,
-                       .opt_param = {
-                               [UD]  = (IB_QP_PKEY_INDEX            |
-                                        IB_QP_QKEY),
-                               [RC]  = (IB_QP_AV                    |
-                                        IB_QP_TIMEOUT               |
-                                        IB_QP_RETRY_CNT             |
-                                        IB_QP_RNR_RETRY             |
-                                        IB_QP_MAX_QP_RD_ATOMIC      |
-                                        IB_QP_MAX_DEST_RD_ATOMIC    |
-                                        IB_QP_CUR_STATE             |
-                                        IB_QP_ALT_PATH              |
-                                        IB_QP_ACCESS_FLAGS          |
-                                        IB_QP_PKEY_INDEX            |
-                                        IB_QP_MIN_RNR_TIMER         |
-                                        IB_QP_PATH_MIG_STATE),
-                               [MLX] = (IB_QP_PKEY_INDEX            |
-                                        IB_QP_QKEY),
-                       }
-               }
-       },
-       [IB_QPS_SQE]   = {
-               [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
-               [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
-               [IB_QPS_RTS]   = {
-                       .trans = MTHCA_TRANS_SQERR2RTS,
-                       .opt_param = {
-                               [UD]  = (IB_QP_CUR_STATE             |
-                                        IB_QP_QKEY),
-                               [RC]  = (IB_QP_CUR_STATE             |
-                                        IB_QP_MIN_RNR_TIMER),
-                               [MLX] = (IB_QP_CUR_STATE             |
-                                        IB_QP_QKEY),
-                       }
-               }
-       },
-       [IB_QPS_ERR] = {
-               [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
-               [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }
-       }
-};
-
 static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
                        int attr_mask)
 {
@@ -517,12 +314,11 @@ static void init_port(struct mthca_dev *dev, int port)
 
        memset(&param, 0, sizeof param);
 
-       param.enable_1x = 1;
-       param.enable_4x = 1;
-       param.vl_cap    = dev->limits.vl_cap;
-       param.mtu_cap   = dev->limits.mtu_cap;
-       param.gid_cap   = dev->limits.gid_table_len;
-       param.pkey_cap  = dev->limits.pkey_table_len;
+       param.port_width = dev->limits.port_width_cap;
+       param.vl_cap     = dev->limits.vl_cap;
+       param.mtu_cap    = dev->limits.mtu_cap;
+       param.gid_cap    = dev->limits.gid_table_len;
+       param.pkey_cap   = dev->limits.pkey_table_len;
 
        err = mthca_INIT_IB(dev, &param, port, &status);
        if (err)
@@ -531,70 +327,285 @@ static void init_port(struct mthca_dev *dev, int port)
                mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
 }
 
-int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
+static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
+                                 int attr_mask)
+{
+       u8 dest_rd_atomic;
+       u32 access_flags;
+       u32 hw_access_flags = 0;
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               dest_rd_atomic = attr->max_dest_rd_atomic;
+       else
+               dest_rd_atomic = qp->resp_depth;
+
+       if (attr_mask & IB_QP_ACCESS_FLAGS)
+               access_flags = attr->qp_access_flags;
+       else
+               access_flags = qp->atomic_rd_en;
+
+       if (!dest_rd_atomic)
+               access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+       if (access_flags & IB_ACCESS_REMOTE_READ)
+               hw_access_flags |= MTHCA_QP_BIT_RRE;
+       if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+               hw_access_flags |= MTHCA_QP_BIT_RAE;
+       if (access_flags & IB_ACCESS_REMOTE_WRITE)
+               hw_access_flags |= MTHCA_QP_BIT_RWE;
+
+       return cpu_to_be32(hw_access_flags);
+}
+
+static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
+{
+       switch (mthca_state) {
+       case MTHCA_QP_STATE_RST:      return IB_QPS_RESET;
+       case MTHCA_QP_STATE_INIT:     return IB_QPS_INIT;
+       case MTHCA_QP_STATE_RTR:      return IB_QPS_RTR;
+       case MTHCA_QP_STATE_RTS:      return IB_QPS_RTS;
+       case MTHCA_QP_STATE_DRAINING:
+       case MTHCA_QP_STATE_SQD:      return IB_QPS_SQD;
+       case MTHCA_QP_STATE_SQE:      return IB_QPS_SQE;
+       case MTHCA_QP_STATE_ERR:      return IB_QPS_ERR;
+       default:                      return -1;
+       }
+}
+
+static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
+{
+       switch (mthca_mig_state) {
+       case 0:  return IB_MIG_ARMED;
+       case 1:  return IB_MIG_REARM;
+       case 3:  return IB_MIG_MIGRATED;
+       default: return -1;
+       }
+}
+
+static int to_ib_qp_access_flags(int mthca_flags)
+{
+       int ib_flags = 0;
+
+       if (mthca_flags & MTHCA_QP_BIT_RRE)
+               ib_flags |= IB_ACCESS_REMOTE_READ;
+       if (mthca_flags & MTHCA_QP_BIT_RWE)
+               ib_flags |= IB_ACCESS_REMOTE_WRITE;
+       if (mthca_flags & MTHCA_QP_BIT_RAE)
+               ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+       return ib_flags;
+}
+
+static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
+                               struct mthca_qp_path *path)
+{
+       memset(ib_ah_attr, 0, sizeof *path);
+       ib_ah_attr->port_num      = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
+
+       if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
+               return;
+
+       ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
+       ib_ah_attr->sl            = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
+       ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
+       ib_ah_attr->static_rate   = mthca_rate_to_ib(dev,
+                                                    path->static_rate & 0xf,
+                                                    ib_ah_attr->port_num);
+       ib_ah_attr->ah_flags      = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
+       if (ib_ah_attr->ah_flags) {
+               ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
+               ib_ah_attr->grh.hop_limit  = path->hop_limit;
+               ib_ah_attr->grh.traffic_class =
+                       (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
+               ib_ah_attr->grh.flow_label =
+                       be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
+               memcpy(ib_ah_attr->grh.dgid.raw,
+                       path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
+       }
+}
+
+int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+                  struct ib_qp_init_attr *qp_init_attr)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_qp *qp = to_mqp(ibqp);
+       int err = 0;
+       struct mthca_mailbox *mailbox = NULL;
+       struct mthca_qp_param *qp_param;
+       struct mthca_qp_context *context;
+       int mthca_state;
+       u8 status;
+
+       if (qp->state == IB_QPS_RESET) {
+               qp_attr->qp_state = IB_QPS_RESET;
+               goto done;
+       }
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
+       if (err)
+               goto out;
+       if (status) {
+               mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
+               err = -EINVAL;
+               goto out;
+       }
+
+       qp_param    = mailbox->buf;
+       context     = &qp_param->context;
+       mthca_state = be32_to_cpu(context->flags) >> 28;
+
+       qp_attr->qp_state            = to_ib_qp_state(mthca_state);
+       qp_attr->path_mtu            = context->mtu_msgmax >> 5;
+       qp_attr->path_mig_state      =
+               to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
+       qp_attr->qkey                = be32_to_cpu(context->qkey);
+       qp_attr->rq_psn              = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
+       qp_attr->sq_psn              = be32_to_cpu(context->next_send_psn) & 0xffffff;
+       qp_attr->dest_qp_num         = be32_to_cpu(context->remote_qpn) & 0xffffff;
+       qp_attr->qp_access_flags     =
+               to_ib_qp_access_flags(be32_to_cpu(context->params2));
+
+       if (qp->transport == RC || qp->transport == UC) {
+               to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
+               to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+               qp_attr->alt_pkey_index =
+                       be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
+               qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
+       }
+
+       qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
+       qp_attr->port_num   =
+               (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
+
+       /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
+       qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
+
+       qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
+
+       qp_attr->max_dest_rd_atomic =
+               1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
+       qp_attr->min_rnr_timer      =
+               (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
+       qp_attr->timeout            = context->pri_path.ackto >> 3;
+       qp_attr->retry_cnt          = (be32_to_cpu(context->params1) >> 16) & 0x7;
+       qp_attr->rnr_retry          = context->pri_path.rnr_retry >> 5;
+       qp_attr->alt_timeout        = context->alt_path.ackto >> 3;
+
+done:
+       qp_attr->cur_qp_state        = qp_attr->qp_state;
+       qp_attr->cap.max_send_wr     = qp->sq.max;
+       qp_attr->cap.max_recv_wr     = qp->rq.max;
+       qp_attr->cap.max_send_sge    = qp->sq.max_gs;
+       qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
+       qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+       qp_init_attr->cap            = qp_attr->cap;
+
+out:
+       mthca_free_mailbox(dev, mailbox);
+       return err;
+}
+
+static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
+                         struct mthca_qp_path *path, u8 port)
+{
+       path->g_mylmc     = ah->src_path_bits & 0x7f;
+       path->rlid        = cpu_to_be16(ah->dlid);
+       path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
+
+       if (ah->ah_flags & IB_AH_GRH) {
+               if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
+                       mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
+                                 ah->grh.sgid_index, dev->limits.gid_table_len-1);
+                       return -1;
+               }
+
+               path->g_mylmc   |= 1 << 7;
+               path->mgid_index = ah->grh.sgid_index;
+               path->hop_limit  = ah->grh.hop_limit;
+               path->sl_tclass_flowlabel =
+                       cpu_to_be32((ah->sl << 28)                |
+                                   (ah->grh.traffic_class << 20) |
+                                   (ah->grh.flow_label));
+               memcpy(path->rgid, ah->grh.dgid.raw, 16);
+       } else
+               path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
+
+       return 0;
+}
+
+int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+                   struct ib_udata *udata)
 {
        struct mthca_dev *dev = to_mdev(ibqp->device);
        struct mthca_qp *qp = to_mqp(ibqp);
        enum ib_qp_state cur_state, new_state;
-       void *mailbox = NULL;
+       struct mthca_mailbox *mailbox;
        struct mthca_qp_param *qp_param;
        struct mthca_qp_context *qp_context;
-       u32 req_param, opt_param;
+       u32 sqd_event = 0;
        u8 status;
-       int err;
+       int err = -EINVAL;
+
+       mutex_lock(&qp->mutex);
 
        if (attr_mask & IB_QP_CUR_STATE) {
-               if (attr->cur_qp_state != IB_QPS_RTR &&
-                   attr->cur_qp_state != IB_QPS_RTS &&
-                   attr->cur_qp_state != IB_QPS_SQD &&
-                   attr->cur_qp_state != IB_QPS_SQE)
-                       return -EINVAL;
-               else
-                       cur_state = attr->cur_qp_state;
+               cur_state = attr->cur_qp_state;
        } else {
-               spin_lock_irq(&qp->lock);
+               spin_lock_irq(&qp->sq.lock);
+               spin_lock(&qp->rq.lock);
                cur_state = qp->state;
-               spin_unlock_irq(&qp->lock);
+               spin_unlock(&qp->rq.lock);
+               spin_unlock_irq(&qp->sq.lock);
        }
 
-       if (attr_mask & IB_QP_STATE) {
-               if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
-                       return -EINVAL;
-               new_state = attr->qp_state;
-       } else
-               new_state = cur_state;
+       new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
 
-       if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
-               mthca_dbg(dev, "Illegal QP transition "
-                         "%d->%d\n", cur_state, new_state);
-               return -EINVAL;
+       if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
+               mthca_dbg(dev, "Bad QP transition (transport %d) "
+                         "%d->%d with attr 0x%08x\n",
+                         qp->transport, cur_state, new_state,
+                         attr_mask);
+               goto out;
        }
 
-       req_param = state_table[cur_state][new_state].req_param[qp->transport];
-       opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
+       if ((attr_mask & IB_QP_PKEY_INDEX) &&
+            attr->pkey_index >= dev->limits.pkey_table_len) {
+               mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
+                         attr->pkey_index, dev->limits.pkey_table_len-1);
+               goto out;
+       }
 
-       if ((req_param & attr_mask) != req_param) {
-               mthca_dbg(dev, "QP transition "
-                         "%d->%d missing req attr 0x%08x\n",
-                         cur_state, new_state,
-                         req_param & ~attr_mask);
-               return -EINVAL;
+       if ((attr_mask & IB_QP_PORT) &&
+           (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
+               mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
+               goto out;
        }
 
-       if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
-               mthca_dbg(dev, "QP transition (transport %d) "
-                         "%d->%d has extra attr 0x%08x\n",
-                         qp->transport,
-                         cur_state, new_state,
-                         attr_mask & ~(req_param | opt_param |
-                                                IB_QP_STATE));
-               return -EINVAL;
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+           attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
+               mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
+                         attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
+               goto out;
        }
 
-       mailbox = kmalloc(sizeof (*qp_param) + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);
-       if (!mailbox)
-               return -ENOMEM;
-       qp_param = MAILBOX_ALIGN(mailbox);
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+           attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
+               mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
+                         attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
+               goto out;
+       }
+
+       mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto out;
+       }
+       qp_param = mailbox->buf;
        qp_context = &qp_param->context;
        memset(qp_param, 0, sizeof *qp_param);
 
@@ -617,15 +628,37 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                        break;
                }
        }
-       /* leave sched_queue as 0 */
+
+       /* leave tavor_sched_queue as 0 */
+
        if (qp->transport == MLX || qp->transport == UD)
-               qp_context->mtu_msgmax = cpu_to_be32((IB_MTU_2048 << 29) |
-                                                    (11 << 24));
+               qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
        else if (attr_mask & IB_QP_PATH_MTU) {
-               qp_context->mtu_msgmax = cpu_to_be32((attr->path_mtu << 29) |
-                                                    (31 << 24));
+               if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
+                       mthca_dbg(dev, "path MTU (%u) is invalid\n",
+                                 attr->path_mtu);
+                       goto out_mailbox;
+               }
+               qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
+       }
+
+       if (mthca_is_memfree(dev)) {
+               if (qp->rq.max)
+                       qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
+               qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
+
+               if (qp->sq.max)
+                       qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
+               qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
        }
-       qp_context->usr_page   = cpu_to_be32(MTHCA_KAR_PAGE);
+
+       /* leave arbel_sched_queue as 0 */
+
+       if (qp->ibqp.uobject)
+               qp_context->usr_page =
+                       cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
+       else
+               qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
        qp_context->local_qpn  = cpu_to_be32(qp->qpn);
        if (attr_mask & IB_QP_DEST_QPN) {
                qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
@@ -633,7 +666,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
 
        if (qp->transport == MLX)
                qp_context->pri_path.port_pkey |=
-                       cpu_to_be32(to_msqp(qp)->port << 24);
+                       cpu_to_be32(qp->port << 24);
        else {
                if (attr_mask & IB_QP_PORT) {
                        qp_context->pri_path.port_pkey |=
@@ -649,37 +682,47 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        }
 
        if (attr_mask & IB_QP_RNR_RETRY) {
-               qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;
-               qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);
+               qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
+                       attr->rnr_retry << 5;
+               qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
+                                                       MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
        }
 
        if (attr_mask & IB_QP_AV) {
-               qp_context->pri_path.g_mylmc     = attr->ah_attr.src_path_bits & 0x7f;
-               qp_context->pri_path.rlid        = cpu_to_be16(attr->ah_attr.dlid);
-               qp_context->pri_path.static_rate = (!!attr->ah_attr.static_rate) << 3;
-               if (attr->ah_attr.ah_flags & IB_AH_GRH) {
-                       qp_context->pri_path.g_mylmc |= 1 << 7;
-                       qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
-                       qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
-                       qp_context->pri_path.sl_tclass_flowlabel =
-                               cpu_to_be32((attr->ah_attr.sl << 28)                |
-                                           (attr->ah_attr.grh.traffic_class << 20) |
-                                           (attr->ah_attr.grh.flow_label));
-                       memcpy(qp_context->pri_path.rgid,
-                              attr->ah_attr.grh.dgid.raw, 16);
-               } else {
-                       qp_context->pri_path.sl_tclass_flowlabel =
-                               cpu_to_be32(attr->ah_attr.sl << 28);
-               }
+               if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
+                                  attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
+                       goto out_mailbox;
+
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
        }
 
        if (attr_mask & IB_QP_TIMEOUT) {
-               qp_context->pri_path.ackto = attr->timeout;
+               qp_context->pri_path.ackto = attr->timeout << 3;
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
        }
 
-       /* XXX alt_path */
+       if (attr_mask & IB_QP_ALT_PATH) {
+               if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
+                       mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
+                                 attr->alt_pkey_index, dev->limits.pkey_table_len-1);
+                       goto out_mailbox;
+               }
+
+               if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
+                       mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
+                               attr->alt_port_num);
+                       goto out_mailbox;
+               }
+
+               if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
+                                  attr->alt_ah_attr.port_num))
+                       goto out_mailbox;
+
+               qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
+                                                             attr->alt_port_num << 24);
+               qp_context->alt_path.ackto = attr->alt_timeout << 3;
+               qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
+       }
 
        /* leave rdd as 0 */
        qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
@@ -687,20 +730,22 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);
        qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
                                             (MTHCA_FLIGHT_LIMIT << 24) |
-                                            MTHCA_QP_BIT_SRE           |
-                                            MTHCA_QP_BIT_SWE           |
-                                            MTHCA_QP_BIT_SAE);
-       if (qp->sq.policy == IB_SIGNAL_ALL_WR)
+                                            MTHCA_QP_BIT_SWE);
+       if (qp->sq_policy == IB_SIGNAL_ALL_WR)
                qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
        if (attr_mask & IB_QP_RETRY_CNT) {
                qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
        }
 
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
-               qp_context->params1 |= cpu_to_be32(min(attr->max_dest_rd_atomic ?
-                                                      ffs(attr->max_dest_rd_atomic) - 1 : 0,
-                                                      7) << 21);
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+               if (attr->max_rd_atomic) {
+                       qp_context->params1 |=
+                               cpu_to_be32(MTHCA_QP_BIT_SRE |
+                                           MTHCA_QP_BIT_SAE);
+                       qp_context->params1 |=
+                               cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
+               }
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
        }
 
@@ -708,78 +753,31 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
        qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
 
-       if (attr_mask & IB_QP_ACCESS_FLAGS) {
-               /*
-                * Only enable RDMA/atomics if we have responder
-                * resources set to a non-zero value.
-                */
-               if (qp->resp_depth) {
-                       qp_context->params2 |=
-                               cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
-                                           MTHCA_QP_BIT_RWE : 0);
-                       qp_context->params2 |=
-                               cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
-                                           MTHCA_QP_BIT_RRE : 0);
+       if (mthca_is_memfree(dev)) {
+               qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
+               qp_context->snd_db_index   = cpu_to_be32(qp->sq.db_index);
+       }
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+               if (attr->max_dest_rd_atomic)
                        qp_context->params2 |=
-                               cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
-                                           MTHCA_QP_BIT_RAE : 0);
-               }
+                               cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
 
+               qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
+       }
+
+       if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
+               qp_context->params2      |= get_hw_access_flags(qp, attr, attr_mask);
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
                                                        MTHCA_QP_OPTPAR_RRE |
                                                        MTHCA_QP_OPTPAR_RAE);
-
-               qp->atomic_rd_en = attr->qp_access_flags;
        }
 
-       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
-               u8 rra_max;
-
-               if (qp->resp_depth && !attr->max_rd_atomic) {
-                       /*
-                        * Lowering our responder resources to zero.
-                        * Turn off RDMA/atomics as responder.
-                        * (RWE/RRE/RAE in params2 already zero)
-                        */
-                       qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
-                                                               MTHCA_QP_OPTPAR_RRE |
-                                                               MTHCA_QP_OPTPAR_RAE);
-               }
-
-               if (!qp->resp_depth && attr->max_rd_atomic) {
-                       /*
-                        * Increasing our responder resources from
-                        * zero.  Turn on RDMA/atomics as appropriate.
-                        */
-                       qp_context->params2 |=
-                               cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?
-                                           MTHCA_QP_BIT_RWE : 0);
-                       qp_context->params2 |=
-                               cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
-                                           MTHCA_QP_BIT_RRE : 0);
-                       qp_context->params2 |=
-                               cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
-                                           MTHCA_QP_BIT_RAE : 0);
-
-                       qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
-                                                               MTHCA_QP_OPTPAR_RRE |
-                                                               MTHCA_QP_OPTPAR_RAE);
-               }
-
-               for (rra_max = 0;
-                    1 << rra_max < attr->max_rd_atomic &&
-                            rra_max < dev->qp_table.rdb_shift;
-                    ++rra_max)
-                       ; /* nothing */
-
-               qp_context->params2      |= cpu_to_be32(rra_max << 21);
-               qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
+       qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
 
-               qp->resp_depth = attr->max_rd_atomic;
-       }
+       if (ibqp->srq)
+               qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
 
-       if (qp->rq.policy == IB_SIGNAL_ALL_WR)
-               qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
        if (attr_mask & IB_QP_MIN_RNR_TIMER) {
                qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
@@ -787,52 +785,152 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        if (attr_mask & IB_QP_RQ_PSN)
                qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
 
-       qp_context->ra_buff_indx = dev->qp_table.rdb_base +
-               ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
-                dev->qp_table.rdb_shift);
+       qp_context->ra_buff_indx =
+               cpu_to_be32(dev->qp_table.rdb_base +
+                           ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
+                            dev->qp_table.rdb_shift));
 
        qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
 
+       if (mthca_is_memfree(dev))
+               qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);
+
        if (attr_mask & IB_QP_QKEY) {
                qp_context->qkey = cpu_to_be32(attr->qkey);
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
        }
 
-       err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
-                             qp->qpn, 0, qp_param, 0, &status);
+       if (ibqp->srq)
+               qp_context->srqn = cpu_to_be32(1 << 24 |
+                                              to_msrq(ibqp->srq)->srqn);
+
+       if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD  &&
+           attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY               &&
+           attr->en_sqd_async_notify)
+               sqd_event = 1 << 31;
+
+       err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
+                             mailbox, sqd_event, &status);
+       if (err)
+               goto out_mailbox;
        if (status) {
-               mthca_warn(dev, "modify QP %d returned status %02x.\n",
-                          state_table[cur_state][new_state].trans, status);
+               mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
+                          cur_state, new_state, status);
                err = -EINVAL;
+               goto out_mailbox;
        }
 
-       if (!err)
-               qp->state = new_state;
-
-       kfree(mailbox);
+       qp->state = new_state;
+       if (attr_mask & IB_QP_ACCESS_FLAGS)
+               qp->atomic_rd_en = attr->qp_access_flags;
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               qp->resp_depth = attr->max_dest_rd_atomic;
+       if (attr_mask & IB_QP_PORT)
+               qp->port = attr->port_num;
+       if (attr_mask & IB_QP_ALT_PATH)
+               qp->alt_port = attr->alt_port_num;
 
        if (is_sqp(dev, qp))
                store_attrs(to_msqp(qp), attr, attr_mask);
 
        /*
-        * If we are moving QP0 to RTR, bring the IB link up; if we
-        * are moving QP0 to RESET or ERROR, bring the link back down.
+        * If we moved QP0 to RTR, bring the IB link up; if we moved
+        * QP0 to RESET or ERROR, bring the link back down.
         */
        if (is_qp0(dev, qp)) {
                if (cur_state != IB_QPS_RTR &&
                    new_state == IB_QPS_RTR)
-                       init_port(dev, to_msqp(qp)->port);
+                       init_port(dev, qp->port);
 
                if (cur_state != IB_QPS_RESET &&
                    cur_state != IB_QPS_ERR &&
                    (new_state == IB_QPS_RESET ||
                     new_state == IB_QPS_ERR))
-                       mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
+                       mthca_CLOSE_IB(dev, qp->port, &status);
        }
 
+       /*
+        * If we moved a kernel QP to RESET, clean up all old CQ
+        * entries and reinitialize the QP.
+        */
+       if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
+               mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
+                              qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+               if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
+                       mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
+
+               mthca_wq_reset(&qp->sq);
+               qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
+
+               mthca_wq_reset(&qp->rq);
+               qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
+
+               if (mthca_is_memfree(dev)) {
+                       *qp->sq.db = 0;
+                       *qp->rq.db = 0;
+               }
+       }
+
+out_mailbox:
+       mthca_free_mailbox(dev, mailbox);
+
+out:
+       mutex_unlock(&qp->mutex);
        return err;
 }
 
+static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
+{
+       /*
+        * Calculate the maximum size of WQE s/g segments, excluding
+        * the next segment and other non-data segments.
+        */
+       int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
+
+       switch (qp->transport) {
+       case MLX:
+               max_data_size -= 2 * sizeof (struct mthca_data_seg);
+               break;
+
+       case UD:
+               if (mthca_is_memfree(dev))
+                       max_data_size -= sizeof (struct mthca_arbel_ud_seg);
+               else
+                       max_data_size -= sizeof (struct mthca_tavor_ud_seg);
+               break;
+
+       default:
+               max_data_size -= sizeof (struct mthca_raddr_seg);
+               break;
+       }
+
+       return max_data_size;
+}
+
+static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
+{
+       /* We don't support inline data for kernel QPs (yet). */
+       return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
+}
+
+static void mthca_adjust_qp_caps(struct mthca_dev *dev,
+                                struct mthca_pd *pd,
+                                struct mthca_qp *qp)
+{
+       int max_data_size = mthca_max_data_size(dev, qp,
+                                               min(dev->limits.max_desc_sz,
+                                                   1 << qp->sq.wqe_shift));
+
+       qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
+
+       qp->sq.max_gs = min_t(int, dev->limits.max_sg,
+                             max_data_size / sizeof (struct mthca_data_seg));
+       qp->rq.max_gs = min_t(int, dev->limits.max_sg,
+                              (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
+                               sizeof (struct mthca_next_seg)) /
+                              sizeof (struct mthca_data_seg));
+}
+
 /*
  * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
  * rq.max_gs and sq.max_gs must all be assigned.
@@ -845,27 +943,57 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
                               struct mthca_qp *qp)
 {
        int size;
-       int i;
-       int npages, shift;
-       dma_addr_t t;
-       u64 *dma_list = NULL;
        int err = -ENOMEM;
 
        size = sizeof (struct mthca_next_seg) +
                qp->rq.max_gs * sizeof (struct mthca_data_seg);
 
+       if (size > dev->limits.max_desc_sz)
+               return -EINVAL;
+
        for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
             qp->rq.wqe_shift++)
                ; /* nothing */
 
-       size = sizeof (struct mthca_next_seg) +
-               qp->sq.max_gs * sizeof (struct mthca_data_seg);
-       if (qp->transport == MLX)
+       size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
+       switch (qp->transport) {
+       case MLX:
                size += 2 * sizeof (struct mthca_data_seg);
-       else if (qp->transport == UD)
-               size += sizeof (struct mthca_ud_seg);
-       else /* bind seg is as big as atomic + raddr segs */
-               size += sizeof (struct mthca_bind_seg);
+               break;
+
+       case UD:
+               size += mthca_is_memfree(dev) ?
+                       sizeof (struct mthca_arbel_ud_seg) :
+                       sizeof (struct mthca_tavor_ud_seg);
+               break;
+
+       case UC:
+               size += sizeof (struct mthca_raddr_seg);
+               break;
+
+       case RC:
+               size += sizeof (struct mthca_raddr_seg);
+               /*
+                * An atomic op will require an atomic segment, a
+                * remote address segment and one scatter entry.
+                */
+               size = max_t(int, size,
+                            sizeof (struct mthca_atomic_seg) +
+                            sizeof (struct mthca_raddr_seg) +
+                            sizeof (struct mthca_data_seg));
+               break;
+
+       default:
+               break;
+       }
+
+       /* Make sure that we have enough space for a bind request */
+       size = max_t(int, size, sizeof (struct mthca_bind_seg));
+
+       size += sizeof (struct mthca_next_seg);
+
+       if (size > dev->limits.max_desc_sz)
+               return -EINVAL;
 
        for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
             qp->sq.wqe_shift++)
@@ -873,6 +1001,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
 
        qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
                                    1 << qp->sq.wqe_shift);
+
+       /*
+        * If this is a userspace QP, we don't actually have to
+        * allocate anything.  All we need is to calculate the WQE
+        * sizes and the send_wqe_offset, so we're done now.
+        */
+       if (pd->ibpd.uobject)
+               return 0;
+
        size = PAGE_ALIGN(qp->send_wqe_offset +
                          (qp->sq.max << qp->sq.wqe_shift));
 
@@ -881,95 +1018,95 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
        if (!qp->wrid)
                goto err_out;
 
-       if (size <= MTHCA_MAX_DIRECT_QP_SIZE) {
-               qp->is_direct = 1;
-               npages = 1;
-               shift = get_order(size) + PAGE_SHIFT;
+       err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
+                             &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
+       if (err)
+               goto err_out;
 
-               if (0)
-                       mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
-                                 size, shift);
+       return 0;
 
-               qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t);
-               if (!qp->queue.direct.buf)
-                       goto err_out;
+err_out:
+       kfree(qp->wrid);
+       return err;
+}
 
-               pci_unmap_addr_set(&qp->queue.direct, mapping, t);
+static void mthca_free_wqe_buf(struct mthca_dev *dev,
+                              struct mthca_qp *qp)
+{
+       mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
+                                      (qp->sq.max << qp->sq.wqe_shift)),
+                      &qp->queue, qp->is_direct, &qp->mr);
+       kfree(qp->wrid);
+}
 
-               memset(qp->queue.direct.buf, 0, size);
+static int mthca_map_memfree(struct mthca_dev *dev,
+                            struct mthca_qp *qp)
+{
+       int ret;
 
-               while (t & ((1 << shift) - 1)) {
-                       --shift;
-                       npages *= 2;
-               }
+       if (mthca_is_memfree(dev)) {
+               ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
+               if (ret)
+                       return ret;
 
-               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
-               if (!dma_list)
-                       goto err_out_free;
+               ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
+               if (ret)
+                       goto err_qpc;
 
-               for (i = 0; i < npages; ++i)
-                       dma_list[i] = t + i * (1 << shift);
-       } else {
-               qp->is_direct = 0;
-               npages = size / PAGE_SIZE;
-               shift = PAGE_SHIFT;
+               ret = mthca_table_get(dev, dev->qp_table.rdb_table,
+                                     qp->qpn << dev->qp_table.rdb_shift);
+               if (ret)
+                       goto err_eqpc;
 
-               if (0)
-                       mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);
+       }
 
-               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
-               if (!dma_list)
-                       goto err_out;
+       return 0;
 
-               qp->queue.page_list = kmalloc(npages *
-                                             sizeof *qp->queue.page_list,
-                                             GFP_KERNEL);
-               if (!qp->queue.page_list)
-                       goto err_out;
+err_eqpc:
+       mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
 
-               for (i = 0; i < npages; ++i) {
-                       qp->queue.page_list[i].buf =
-                               pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);
-                       if (!qp->queue.page_list[i].buf)
-                               goto err_out_free;
+err_qpc:
+       mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
 
-                       memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);
+       return ret;
+}
 
-                       pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
-                       dma_list[i] = t;
-               }
-       }
-
-       err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
-                                 npages, 0, size,
-                                 MTHCA_MPT_FLAG_LOCAL_WRITE |
-                                 MTHCA_MPT_FLAG_LOCAL_READ,
-                                 &qp->mr);
-       if (err)
-               goto err_out_free;
-
-       kfree(dma_list);
-       return 0;
+static void mthca_unmap_memfree(struct mthca_dev *dev,
+                               struct mthca_qp *qp)
+{
+       mthca_table_put(dev, dev->qp_table.rdb_table,
+                       qp->qpn << dev->qp_table.rdb_shift);
+       mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
+       mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
+}
 
- err_out_free:
-       if (qp->is_direct) {
-               pci_free_consistent(dev->pdev, size,
-                                   qp->queue.direct.buf,
-                                   pci_unmap_addr(&qp->queue.direct, mapping));
-       } else
-               for (i = 0; i < npages; ++i) {
-                       if (qp->queue.page_list[i].buf)
-                               pci_free_consistent(dev->pdev, PAGE_SIZE,
-                                                   qp->queue.page_list[i].buf,
-                                                   pci_unmap_addr(&qp->queue.page_list[i],
-                                                                  mapping));
+static int mthca_alloc_memfree(struct mthca_dev *dev,
+                              struct mthca_qp *qp)
+{
+       int ret = 0;
+
+       if (mthca_is_memfree(dev)) {
+               qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
+                                                qp->qpn, &qp->rq.db);
+               if (qp->rq.db_index < 0)
+                       return ret;
+
+               qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
+                                                qp->qpn, &qp->sq.db);
+               if (qp->sq.db_index < 0)
+                       mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
+       }
 
-               }
+       return ret;
+}
 
- err_out:
-       kfree(qp->wrid);
-       kfree(dma_list);
-       return err;
+static void mthca_free_memfree(struct mthca_dev *dev,
+                              struct mthca_qp *qp)
+{
+       if (mthca_is_memfree(dev)) {
+               mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
+               mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
+       }
 }
 
 static int mthca_alloc_qp_common(struct mthca_dev *dev,
@@ -977,29 +1114,120 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
                                 struct mthca_cq *send_cq,
                                 struct mthca_cq *recv_cq,
                                 enum ib_sig_type send_policy,
-                                enum ib_sig_type recv_policy,
                                 struct mthca_qp *qp)
 {
-       int err;
+       int ret;
+       int i;
 
-       spin_lock_init(&qp->lock);
-       atomic_set(&qp->refcount, 1);
+       qp->refcount = 1;
+       init_waitqueue_head(&qp->wait);
+       mutex_init(&qp->mutex);
        qp->state        = IB_QPS_RESET;
        qp->atomic_rd_en = 0;
        qp->resp_depth   = 0;
-       qp->sq.policy    = send_policy;
-       qp->rq.policy    = recv_policy;
-       qp->rq.cur       = 0;
-       qp->sq.cur       = 0;
-       qp->rq.next      = 0;
-       qp->sq.next      = 0;
-       qp->rq.last_comp = qp->rq.max - 1;
-       qp->sq.last_comp = qp->sq.max - 1;
-       qp->rq.last      = NULL;
-       qp->sq.last      = NULL;
-
-       err = mthca_alloc_wqe_buf(dev, pd, qp);
-       return err;
+       qp->sq_policy    = send_policy;
+       mthca_wq_reset(&qp->sq);
+       mthca_wq_reset(&qp->rq);
+
+       spin_lock_init(&qp->sq.lock);
+       spin_lock_init(&qp->rq.lock);
+
+       ret = mthca_map_memfree(dev, qp);
+       if (ret)
+               return ret;
+
+       ret = mthca_alloc_wqe_buf(dev, pd, qp);
+       if (ret) {
+               mthca_unmap_memfree(dev, qp);
+               return ret;
+       }
+
+       mthca_adjust_qp_caps(dev, pd, qp);
+
+       /*
+        * If this is a userspace QP, we're done now.  The doorbells
+        * will be allocated and buffers will be initialized in
+        * userspace.
+        */
+       if (pd->ibpd.uobject)
+               return 0;
+
+       ret = mthca_alloc_memfree(dev, qp);
+       if (ret) {
+               mthca_free_wqe_buf(dev, qp);
+               mthca_unmap_memfree(dev, qp);
+               return ret;
+       }
+
+       if (mthca_is_memfree(dev)) {
+               struct mthca_next_seg *next;
+               struct mthca_data_seg *scatter;
+               int size = (sizeof (struct mthca_next_seg) +
+                           qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
+
+               for (i = 0; i < qp->rq.max; ++i) {
+                       next = get_recv_wqe(qp, i);
+                       next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
+                                                  qp->rq.wqe_shift);
+                       next->ee_nds = cpu_to_be32(size);
+
+                       for (scatter = (void *) (next + 1);
+                            (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
+                            ++scatter)
+                               scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
+               }
+
+               for (i = 0; i < qp->sq.max; ++i) {
+                       next = get_send_wqe(qp, i);
+                       next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
+                                                   qp->sq.wqe_shift) +
+                                                  qp->send_wqe_offset);
+               }
+       }
+
+       qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
+       qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
+
+       return 0;
+}
+
+static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
+                            struct mthca_pd *pd, struct mthca_qp *qp)
+{
+       int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
+
+       /* Sanity check QP size before proceeding */
+       if (cap->max_send_wr     > dev->limits.max_wqes ||
+           cap->max_recv_wr     > dev->limits.max_wqes ||
+           cap->max_send_sge    > dev->limits.max_sg   ||
+           cap->max_recv_sge    > dev->limits.max_sg   ||
+           cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
+               return -EINVAL;
+
+       /*
+        * For MLX transport we need 2 extra S/G entries:
+        * one for the header and one for the checksum at the end
+        */
+       if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
+               return -EINVAL;
+
+       if (mthca_is_memfree(dev)) {
+               qp->rq.max = cap->max_recv_wr ?
+                       roundup_pow_of_two(cap->max_recv_wr) : 0;
+               qp->sq.max = cap->max_send_wr ?
+                       roundup_pow_of_two(cap->max_send_wr) : 0;
+       } else {
+               qp->rq.max = cap->max_recv_wr;
+               qp->sq.max = cap->max_send_wr;
+       }
+
+       qp->rq.max_gs = cap->max_recv_sge;
+       qp->sq.max_gs = max_t(int, cap->max_send_sge,
+                             ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
+                                   MTHCA_INLINE_CHUNK_SIZE) /
+                             sizeof (struct mthca_data_seg));
+
+       return 0;
 }
 
 int mthca_alloc_qp(struct mthca_dev *dev,
@@ -1008,7 +1236,7 @@ int mthca_alloc_qp(struct mthca_dev *dev,
                   struct mthca_cq *recv_cq,
                   enum ib_qp_type type,
                   enum ib_sig_type send_policy,
-                  enum ib_sig_type recv_policy,
+                  struct ib_qp_cap *cap,
                   struct mthca_qp *qp)
 {
        int err;
@@ -1020,12 +1248,19 @@ int mthca_alloc_qp(struct mthca_dev *dev,
        default: return -EINVAL;
        }
 
+       err = mthca_set_qp_size(dev, cap, pd, qp);
+       if (err)
+               return err;
+
        qp->qpn = mthca_alloc(&dev->qp_table.alloc);
        if (qp->qpn == -1)
                return -ENOMEM;
 
+       /* initialize port to zero for error-catching. */
+       qp->port = 0;
+
        err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
-                                   send_policy, recv_policy, qp);
+                                   send_policy, qp);
        if (err) {
                mthca_free(&dev->qp_table.alloc, qp->qpn);
                return err;
@@ -1039,18 +1274,49 @@ int mthca_alloc_qp(struct mthca_dev *dev,
        return 0;
 }
 
+static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+{
+       if (send_cq == recv_cq)
+               spin_lock_irq(&send_cq->lock);
+       else if (send_cq->cqn < recv_cq->cqn) {
+               spin_lock_irq(&send_cq->lock);
+               spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+       } else {
+               spin_lock_irq(&recv_cq->lock);
+               spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+       }
+}
+
+static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+{
+       if (send_cq == recv_cq)
+               spin_unlock_irq(&send_cq->lock);
+       else if (send_cq->cqn < recv_cq->cqn) {
+               spin_unlock(&recv_cq->lock);
+               spin_unlock_irq(&send_cq->lock);
+       } else {
+               spin_unlock(&send_cq->lock);
+               spin_unlock_irq(&recv_cq->lock);
+       }
+}
+
 int mthca_alloc_sqp(struct mthca_dev *dev,
                    struct mthca_pd *pd,
                    struct mthca_cq *send_cq,
                    struct mthca_cq *recv_cq,
                    enum ib_sig_type send_policy,
-                   enum ib_sig_type recv_policy,
+                   struct ib_qp_cap *cap,
                    int qpn,
                    int port,
                    struct mthca_sqp *sqp)
 {
-       int err = 0;
        u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
+       int err;
+
+       sqp->qp.transport = MLX;
+       err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
+       if (err)
+               return err;
 
        sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
        sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
@@ -1068,13 +1334,12 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
        if (err)
                goto err_out;
 
-       sqp->port = port;
+       sqp->qp.port      = port;
        sqp->qp.qpn       = mqpn;
        sqp->qp.transport = MLX;
 
        err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
-                                   send_policy, recv_policy,
-                                   &sqp->qp);
+                                   send_policy, &sqp->qp);
        if (err)
                goto err_out_free;
 
@@ -1083,9 +1348,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
        return 0;
 
  err_out_free:
-       spin_lock_irq(&dev->qp_table.lock);
+       /*
+        * Lock CQs here, so that CQ polling code can do QP lookup
+        * without taking a lock.
+        */
+       mthca_lock_cqs(send_cq, recv_cq);
+
+       spin_lock(&dev->qp_table.lock);
        mthca_array_clear(&dev->qp_table.qp, mqpn);
-       spin_unlock_irq(&dev->qp_table.lock);
+       spin_unlock(&dev->qp_table.lock);
+
+       mthca_unlock_cqs(send_cq, recv_cq);
 
  err_out:
        dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
@@ -1094,47 +1367,64 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
        return err;
 }
 
+static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
+{
+       int c;
+
+       spin_lock_irq(&dev->qp_table.lock);
+       c = qp->refcount;
+       spin_unlock_irq(&dev->qp_table.lock);
+
+       return c;
+}
+
 void mthca_free_qp(struct mthca_dev *dev,
                   struct mthca_qp *qp)
 {
        u8 status;
-       int size;
-       int i;
+       struct mthca_cq *send_cq;
+       struct mthca_cq *recv_cq;
 
-       spin_lock_irq(&dev->qp_table.lock);
-       mthca_array_clear(&dev->qp_table.qp,
-                         qp->qpn & (dev->limits.num_qps - 1));
-       spin_unlock_irq(&dev->qp_table.lock);
+       send_cq = to_mcq(qp->ibqp.send_cq);
+       recv_cq = to_mcq(qp->ibqp.recv_cq);
 
-       atomic_dec(&qp->refcount);
-       wait_event(qp->wait, !atomic_read(&qp->refcount));
+       /*
+        * Lock CQs here, so that CQ polling code can do QP lookup
+        * without taking a lock.
+        */
+       mthca_lock_cqs(send_cq, recv_cq);
 
-       if (qp->state != IB_QPS_RESET)
-               mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
+       spin_lock(&dev->qp_table.lock);
+       mthca_array_clear(&dev->qp_table.qp,
+                         qp->qpn & (dev->limits.num_qps - 1));
+       --qp->refcount;
+       spin_unlock(&dev->qp_table.lock);
 
-       mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn);
-       if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
-               mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
+       mthca_unlock_cqs(send_cq, recv_cq);
 
-       mthca_free_mr(dev, &qp->mr);
+       wait_event(qp->wait, !get_qp_refcount(dev, qp));
 
-       size = PAGE_ALIGN(qp->send_wqe_offset +
-                         (qp->sq.max << qp->sq.wqe_shift));
+       if (qp->state != IB_QPS_RESET)
+               mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
+                               NULL, 0, &status);
 
-       if (qp->is_direct) {
-               pci_free_consistent(dev->pdev, size,
-                                   qp->queue.direct.buf,
-                                   pci_unmap_addr(&qp->queue.direct, mapping));
-       } else {
-               for (i = 0; i < size / PAGE_SIZE; ++i) {
-                       pci_free_consistent(dev->pdev, PAGE_SIZE,
-                                           qp->queue.page_list[i].buf,
-                                           pci_unmap_addr(&qp->queue.page_list[i],
-                                                          mapping));
-               }
+       /*
+        * If this is a userspace QP, the buffers, MR, CQs and so on
+        * will be cleaned up in userspace, so all we have to do is
+        * unref the mem-free tables and free the QPN in our table.
+        */
+       if (!qp->ibqp.uobject) {
+               mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
+                              qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+               if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
+                       mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
+                                      qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+
+               mthca_free_memfree(dev, qp);
+               mthca_free_wqe_buf(dev, qp);
        }
 
-       kfree(qp->wrid);
+       mthca_unmap_memfree(dev, qp);
 
        if (is_sqp(dev, qp)) {
                atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
@@ -1142,8 +1432,7 @@ void mthca_free_qp(struct mthca_dev *dev,
                                  to_msqp(qp)->header_buf_size,
                                  to_msqp(qp)->header_buf,
                                  to_msqp(qp)->header_dma);
-       }
-       else
+       } else
                mthca_free(&dev->qp_table.alloc, qp->qpn);
 }
 
@@ -1155,9 +1444,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
 {
        int header_size;
        int err;
+       u16 pkey;
 
        ib_ud_header_init(256, /* assume a MAD */
-                         sqp->ud_header.grh_present,
+                         mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
                          &sqp->ud_header);
 
        err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
@@ -1165,8 +1455,8 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
                return err;
        mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
        mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
-                                 (sqp->ud_header.lrh.destination_lid == 0xffff ?
-                                  MTHCA_MLX_SLR : 0) |
+                                 (sqp->ud_header.lrh.destination_lid ==
+                                  IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
                                  (sqp->ud_header.lrh.service_level << 8));
        mlx->rlid = sqp->ud_header.lrh.destination_lid;
        mlx->vcrc = 0;
@@ -1186,18 +1476,16 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
        }
 
        sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;
-       if (sqp->ud_header.lrh.destination_lid == 0xffff)
-               sqp->ud_header.lrh.source_lid = 0xffff;
+       if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
+               sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
        sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
        if (!sqp->qp.ibqp.qp_num)
-               ib_get_cached_pkey(&dev->ib_dev, sqp->port,
-                                  sqp->pkey_index,
-                                  &sqp->ud_header.bth.pkey);
+               ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
+                                  sqp->pkey_index, &pkey);
        else
-               ib_get_cached_pkey(&dev->ib_dev, sqp->port,
-                                  wr->wr.ud.pkey_index,
-                                  &sqp->ud_header.bth.pkey);
-       cpu_to_be16s(&sqp->ud_header.bth.pkey);
+               ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
+                                  wr->wr.ud.pkey_index, &pkey);
+       sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
        sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
        sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
        sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
@@ -1216,8 +1504,26 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
        return 0;
 }
 
-int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
-                   struct ib_send_wr **bad_wr)
+static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
+                                   struct ib_cq *ib_cq)
+{
+       unsigned cur;
+       struct mthca_cq *cq;
+
+       cur = wq->head - wq->tail;
+       if (likely(cur + nreq < wq->max))
+               return 0;
+
+       cq = to_mcq(ib_cq);
+       spin_lock(&cq->lock);
+       cur = wq->head - wq->tail;
+       spin_unlock(&cq->lock);
+
+       return cur + nreq >= wq->max;
+}
+
+int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+                         struct ib_send_wr **bad_wr)
 {
        struct mthca_dev *dev = to_mdev(ibqp->device);
        struct mthca_qp *qp = to_mqp(ibqp);
@@ -1229,30 +1535,22 @@ int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        int i;
        int size;
        int size0 = 0;
-       u32 f0 = 0;
+       u32 f0;
        int ind;
        u8 op0 = 0;
 
-       static const u8 opcode[] = {
-               [IB_WR_SEND]                 = MTHCA_OPCODE_SEND,
-               [IB_WR_SEND_WITH_IMM]        = MTHCA_OPCODE_SEND_IMM,
-               [IB_WR_RDMA_WRITE]           = MTHCA_OPCODE_RDMA_WRITE,
-               [IB_WR_RDMA_WRITE_WITH_IMM]  = MTHCA_OPCODE_RDMA_WRITE_IMM,
-               [IB_WR_RDMA_READ]            = MTHCA_OPCODE_RDMA_READ,
-               [IB_WR_ATOMIC_CMP_AND_SWP]   = MTHCA_OPCODE_ATOMIC_CS,
-               [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
-       };
-
-       spin_lock_irqsave(&qp->lock, flags);
+       spin_lock_irqsave(&qp->sq.lock, flags);
 
        /* XXX check that state is OK to post send */
 
-       ind = qp->sq.next;
+       ind = qp->sq.next_ind;
 
        for (nreq = 0; wr; ++nreq, wr = wr->next) {
-               if (qp->sq.cur + nreq >= qp->sq.max) {
-                       mthca_err(dev, "SQ full (%d posted, %d max, %d nreq)\n",
-                                 qp->sq.cur, qp->sq.max, nreq);
+               if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+                       mthca_err(dev, "SQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", qp->qpn,
+                                       qp->sq.head, qp->sq.tail,
+                                       qp->sq.max, nreq);
                        err = -ENOMEM;
                        *bad_wr = wr;
                        goto out;
@@ -1272,7 +1570,7 @@ int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        cpu_to_be32(1);
                if (wr->opcode == IB_WR_SEND_WITH_IMM ||
                    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
-                       ((struct mthca_next_seg *) wqe)->flags = wr->imm_data;
+                       ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
 
                wqe += sizeof (struct mthca_next_seg);
                size = sizeof (struct mthca_next_seg) / 16;
@@ -1302,8 +1600,8 @@ int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                }
 
                                wqe += sizeof (struct mthca_atomic_seg);
-                               size += sizeof (struct mthca_raddr_seg) / 16 +
-                                       sizeof (struct mthca_atomic_seg);
+                               size += (sizeof (struct mthca_raddr_seg) +
+                                        sizeof (struct mthca_atomic_seg)) / 16;
                                break;
 
                        case IB_WR_RDMA_WRITE:
@@ -1325,18 +1623,38 @@ int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
                        break;
 
+               case UC:
+                       switch (wr->opcode) {
+                       case IB_WR_RDMA_WRITE:
+                       case IB_WR_RDMA_WRITE_WITH_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cpu_to_be64(wr->wr.rdma.remote_addr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cpu_to_be32(wr->wr.rdma.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
                case UD:
-                       ((struct mthca_ud_seg *) wqe)->lkey =
+                       ((struct mthca_tavor_ud_seg *) wqe)->lkey =
                                cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
-                       ((struct mthca_ud_seg *) wqe)->av_addr =
+                       ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
                                cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
-                       ((struct mthca_ud_seg *) wqe)->dqpn =
+                       ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
                                cpu_to_be32(wr->wr.ud.remote_qpn);
-                       ((struct mthca_ud_seg *) wqe)->qkey =
+                       ((struct mthca_tavor_ud_seg *) wqe)->qkey =
                                cpu_to_be32(wr->wr.ud.remote_qkey);
 
-                       wqe += sizeof (struct mthca_ud_seg);
-                       size += sizeof (struct mthca_ud_seg) / 16;
+                       wqe += sizeof (struct mthca_tavor_ud_seg);
+                       size += sizeof (struct mthca_tavor_ud_seg) / 16;
                        break;
 
                case MLX:
@@ -1381,26 +1699,28 @@ int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
                qp->wrid[ind + qp->rq.max] = wr->wr_id;
 
-               if (wr->opcode >= ARRAY_SIZE(opcode)) {
+               if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
                        mthca_err(dev, "opcode invalid\n");
                        err = -EINVAL;
                        *bad_wr = wr;
                        goto out;
                }
 
-               if (prev_wqe) {
-                       ((struct mthca_next_seg *) prev_wqe)->nda_op =
-                               cpu_to_be32(((ind << qp->sq.wqe_shift) +
-                                            qp->send_wqe_offset) |
-                                           opcode[wr->opcode]);
-                       smp_wmb();
-                       ((struct mthca_next_seg *) prev_wqe)->ee_nds =
-                               cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
-               }
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cpu_to_be32(((ind << qp->sq.wqe_shift) +
+                                    qp->send_wqe_offset) |
+                                   mthca_opcode[wr->opcode]);
+               wmb();
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |
+                                   ((wr->send_flags & IB_SEND_FENCE) ?
+                                   MTHCA_NEXT_FENCE : 0));
 
                if (!size0) {
                        size0 = size;
-                       op0   = opcode[wr->opcode];
+                       op0   = mthca_opcode[wr->opcode];
+                       f0    = wr->send_flags & IB_SEND_FENCE ?
+                               MTHCA_SEND_DOORBELL_FENCE : 0;
                }
 
                ++ind;
@@ -1409,10 +1729,10 @@ int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        }
 
 out:
-       if (nreq) {
-               u32 doorbell[2];
+       if (likely(nreq)) {
+               __be32 doorbell[2];
 
-               doorbell[0] = cpu_to_be32(((qp->sq.next << qp->sq.wqe_shift) +
+               doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
                                           qp->send_wqe_offset) | f0 | op0);
                doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
 
@@ -1421,20 +1741,26 @@ out:
                mthca_write64(doorbell,
                              dev->kar + MTHCA_SEND_DOORBELL,
                              MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+               /*
+                * Make sure doorbells don't leak out of SQ spinlock
+                * and reach the HCA out of order:
+                */
+               mmiowb();
        }
 
-       qp->sq.cur += nreq;
-       qp->sq.next = ind;
+       qp->sq.next_ind = ind;
+       qp->sq.head    += nreq;
 
-       spin_unlock_irqrestore(&qp->lock, flags);
+       spin_unlock_irqrestore(&qp->sq.lock, flags);
        return err;
 }
 
-int mthca_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
-                      struct ib_recv_wr **bad_wr)
+int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+                            struct ib_recv_wr **bad_wr)
 {
        struct mthca_dev *dev = to_mdev(ibqp->device);
        struct mthca_qp *qp = to_mqp(ibqp);
+       __be32 doorbell[2];
        unsigned long flags;
        int err = 0;
        int nreq;
@@ -1445,15 +1771,18 @@ int mthca_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
        void *wqe;
        void *prev_wqe;
 
-       spin_lock_irqsave(&qp->lock, flags);
+       spin_lock_irqsave(&qp->rq.lock, flags);
 
        /* XXX check that state is OK to post receive */
 
-       ind = qp->rq.next;
+       ind = qp->rq.next_ind;
 
-       for (nreq = 0; wr; ++nreq, wr = wr->next) {
-               if (qp->rq.cur + nreq >= qp->rq.max) {
-                       mthca_err(dev, "RQ %06x full\n", qp->qpn);
+       for (nreq = 0; wr; wr = wr->next) {
+               if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+                       mthca_err(dev, "RQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", qp->qpn,
+                                       qp->rq.head, qp->rq.tail,
+                                       qp->rq.max, nreq);
                        err = -ENOMEM;
                        *bad_wr = wr;
                        goto out;
@@ -1466,14 +1795,12 @@ int mthca_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                ((struct mthca_next_seg *) wqe)->nda_op = 0;
                ((struct mthca_next_seg *) wqe)->ee_nds =
                        cpu_to_be32(MTHCA_NEXT_DBD);
-               ((struct mthca_next_seg *) wqe)->flags =
-                       (wr->recv_flags & IB_RECV_SIGNALED) ?
-                       cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0;
+               ((struct mthca_next_seg *) wqe)->flags = 0;
 
                wqe += sizeof (struct mthca_next_seg);
                size = sizeof (struct mthca_next_seg) / 16;
 
-               if (wr->num_sge > qp->rq.max_gs) {
+               if (unlikely(wr->num_sge > qp->rq.max_gs)) {
                        err = -EINVAL;
                        *bad_wr = wr;
                        goto out;
@@ -1492,13 +1819,11 @@ int mthca_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 
                qp->wrid[ind] = wr->wr_id;
 
-               if (prev_wqe) {
-                       ((struct mthca_next_seg *) prev_wqe)->nda_op =
-                               cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
-                       smp_wmb();
-                       ((struct mthca_next_seg *) prev_wqe)->ee_nds =
-                               cpu_to_be32(MTHCA_NEXT_DBD | size);
-               }
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
+               wmb();
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cpu_to_be32(MTHCA_NEXT_DBD | size);
 
                if (!size0)
                        size0 = size;
@@ -1506,13 +1831,28 @@ int mthca_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                ++ind;
                if (unlikely(ind >= qp->rq.max))
                        ind -= qp->rq.max;
+
+               ++nreq;
+               if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+                       nreq = 0;
+
+                       doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+                       doorbell[1] = cpu_to_be32(qp->qpn << 8);
+
+                       wmb();
+
+                       mthca_write64(doorbell,
+                                     dev->kar + MTHCA_RECEIVE_DOORBELL,
+                                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+                       qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+                       size0 = 0;
+               }
        }
 
 out:
-       if (nreq) {
-               u32 doorbell[2];
-
-               doorbell[0] = cpu_to_be32((qp->rq.next << qp->rq.wqe_shift) | size0);
+       if (likely(nreq)) {
+               doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
                doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
 
                wmb();
@@ -1522,18 +1862,379 @@ out:
                              MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
        }
 
-       qp->rq.cur += nreq;
-       qp->rq.next = ind;
+       qp->rq.next_ind = ind;
+       qp->rq.head    += nreq;
+
+       /*
+        * Make sure doorbells don't leak out of RQ spinlock and reach
+        * the HCA out of order:
+        */
+       mmiowb();
+
+       spin_unlock_irqrestore(&qp->rq.lock, flags);
+       return err;
+}
+
+int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+                         struct ib_send_wr **bad_wr)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_qp *qp = to_mqp(ibqp);
+       __be32 doorbell[2];
+       void *wqe;
+       void *prev_wqe;
+       unsigned long flags;
+       int err = 0;
+       int nreq;
+       int i;
+       int size;
+       int size0 = 0;
+       u32 f0;
+       int ind;
+       u8 op0 = 0;
+
+       spin_lock_irqsave(&qp->sq.lock, flags);
+
+       /* XXX check that state is OK to post send */
+
+       ind = qp->sq.head & (qp->sq.max - 1);
+
+       for (nreq = 0; wr; ++nreq, wr = wr->next) {
+               if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
+                       nreq = 0;
+
+                       doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
+                                                 ((qp->sq.head & 0xffff) << 8) |
+                                                 f0 | op0);
+                       doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
+
+                       qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
+                       size0 = 0;
+
+                       /*
+                        * Make sure that descriptors are written before
+                        * doorbell record.
+                        */
+                       wmb();
+                       *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
+
+                       /*
+                        * Make sure doorbell record is written before we
+                        * write MMIO send doorbell.
+                        */
+                       wmb();
+                       mthca_write64(doorbell,
+                                     dev->kar + MTHCA_SEND_DOORBELL,
+                                     MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+               }
+
+               if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
+                       mthca_err(dev, "SQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", qp->qpn,
+                                       qp->sq.head, qp->sq.tail,
+                                       qp->sq.max, nreq);
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_send_wqe(qp, ind);
+               prev_wqe = qp->sq.last;
+               qp->sq.last = wqe;
+
+               ((struct mthca_next_seg *) wqe)->flags =
+                       ((wr->send_flags & IB_SEND_SIGNALED) ?
+                        cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
+                       ((wr->send_flags & IB_SEND_SOLICITED) ?
+                        cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
+                       cpu_to_be32(1);
+               if (wr->opcode == IB_WR_SEND_WITH_IMM ||
+                   wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+                       ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
+
+               wqe += sizeof (struct mthca_next_seg);
+               size = sizeof (struct mthca_next_seg) / 16;
+
+               switch (qp->transport) {
+               case RC:
+                       switch (wr->opcode) {
+                       case IB_WR_ATOMIC_CMP_AND_SWP:
+                       case IB_WR_ATOMIC_FETCH_AND_ADD:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cpu_to_be64(wr->wr.atomic.remote_addr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cpu_to_be32(wr->wr.atomic.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+
+                               wqe += sizeof (struct mthca_raddr_seg);
+
+                               if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cpu_to_be64(wr->wr.atomic.swap);
+                                       ((struct mthca_atomic_seg *) wqe)->compare =
+                                               cpu_to_be64(wr->wr.atomic.compare_add);
+                               } else {
+                                       ((struct mthca_atomic_seg *) wqe)->swap_add =
+                                               cpu_to_be64(wr->wr.atomic.compare_add);
+                                       ((struct mthca_atomic_seg *) wqe)->compare = 0;
+                               }
+
+                               wqe += sizeof (struct mthca_atomic_seg);
+                               size += (sizeof (struct mthca_raddr_seg) +
+                                        sizeof (struct mthca_atomic_seg)) / 16;
+                               break;
+
+                       case IB_WR_RDMA_READ:
+                       case IB_WR_RDMA_WRITE:
+                       case IB_WR_RDMA_WRITE_WITH_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cpu_to_be64(wr->wr.rdma.remote_addr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cpu_to_be32(wr->wr.rdma.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case UC:
+                       switch (wr->opcode) {
+                       case IB_WR_RDMA_WRITE:
+                       case IB_WR_RDMA_WRITE_WITH_IMM:
+                               ((struct mthca_raddr_seg *) wqe)->raddr =
+                                       cpu_to_be64(wr->wr.rdma.remote_addr);
+                               ((struct mthca_raddr_seg *) wqe)->rkey =
+                                       cpu_to_be32(wr->wr.rdma.rkey);
+                               ((struct mthca_raddr_seg *) wqe)->reserved = 0;
+                               wqe += sizeof (struct mthca_raddr_seg);
+                               size += sizeof (struct mthca_raddr_seg) / 16;
+                               break;
+
+                       default:
+                               /* No extra segments required for sends */
+                               break;
+                       }
+
+                       break;
+
+               case UD:
+                       memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
+                              to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
+                       ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
+                               cpu_to_be32(wr->wr.ud.remote_qpn);
+                       ((struct mthca_arbel_ud_seg *) wqe)->qkey =
+                               cpu_to_be32(wr->wr.ud.remote_qkey);
+
+                       wqe += sizeof (struct mthca_arbel_ud_seg);
+                       size += sizeof (struct mthca_arbel_ud_seg) / 16;
+                       break;
+
+               case MLX:
+                       err = build_mlx_header(dev, to_msqp(qp), ind, wr,
+                                              wqe - sizeof (struct mthca_next_seg),
+                                              wqe);
+                       if (err) {
+                               *bad_wr = wr;
+                               goto out;
+                       }
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+                       break;
+               }
+
+               if (wr->num_sge > qp->sq.max_gs) {
+                       mthca_err(dev, "too many gathers\n");
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               for (i = 0; i < wr->num_sge; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cpu_to_be32(wr->sg_list[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cpu_to_be32(wr->sg_list[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cpu_to_be64(wr->sg_list[i].addr);
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+               }
+
+               /* Add one more inline data segment for ICRC */
+               if (qp->transport == MLX) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cpu_to_be32((1 << 31) | 4);
+                       ((u32 *) wqe)[1] = 0;
+                       wqe += sizeof (struct mthca_data_seg);
+                       size += sizeof (struct mthca_data_seg) / 16;
+               }
+
+               qp->wrid[ind + qp->rq.max] = wr->wr_id;
+
+               if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
+                       mthca_err(dev, "opcode invalid\n");
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               ((struct mthca_next_seg *) prev_wqe)->nda_op =
+                       cpu_to_be32(((ind << qp->sq.wqe_shift) +
+                                    qp->send_wqe_offset) |
+                                   mthca_opcode[wr->opcode]);
+               wmb();
+               ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+                       cpu_to_be32(MTHCA_NEXT_DBD | size |
+                                   ((wr->send_flags & IB_SEND_FENCE) ?
+                                    MTHCA_NEXT_FENCE : 0));
+
+               if (!size0) {
+                       size0 = size;
+                       op0   = mthca_opcode[wr->opcode];
+                       f0    = wr->send_flags & IB_SEND_FENCE ?
+                               MTHCA_SEND_DOORBELL_FENCE : 0;
+               }
+
+               ++ind;
+               if (unlikely(ind >= qp->sq.max))
+                       ind -= qp->sq.max;
+       }
+
+out:
+       if (likely(nreq)) {
+               doorbell[0] = cpu_to_be32((nreq << 24)                  |
+                                         ((qp->sq.head & 0xffff) << 8) |
+                                         f0 | op0);
+               doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
+
+               qp->sq.head += nreq;
+
+               /*
+                * Make sure that descriptors are written before
+                * doorbell record.
+                */
+               wmb();
+               *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
+
+               /*
+                * Make sure doorbell record is written before we
+                * write MMIO send doorbell.
+                */
+               wmb();
+               mthca_write64(doorbell,
+                             dev->kar + MTHCA_SEND_DOORBELL,
+                             MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+       }
+
+       /*
+        * Make sure doorbells don't leak out of SQ spinlock and reach
+        * the HCA out of order:
+        */
+       mmiowb();
+
+       spin_unlock_irqrestore(&qp->sq.lock, flags);
+       return err;
+}
+
+int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+                            struct ib_recv_wr **bad_wr)
+{
+       struct mthca_dev *dev = to_mdev(ibqp->device);
+       struct mthca_qp *qp = to_mqp(ibqp);
+       unsigned long flags;
+       int err = 0;
+       int nreq;
+       int ind;
+       int i;
+       void *wqe;
+
+       spin_lock_irqsave(&qp->rq.lock, flags);
+
+       /* XXX check that state is OK to post receive */
+
+       ind = qp->rq.head & (qp->rq.max - 1);
+
+       for (nreq = 0; wr; ++nreq, wr = wr->next) {
+               if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+                       mthca_err(dev, "RQ %06x full (%u head, %u tail,"
+                                       " %d max, %d nreq)\n", qp->qpn,
+                                       qp->rq.head, qp->rq.tail,
+                                       qp->rq.max, nreq);
+                       err = -ENOMEM;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               wqe = get_recv_wqe(qp, ind);
+
+               ((struct mthca_next_seg *) wqe)->flags = 0;
+
+               wqe += sizeof (struct mthca_next_seg);
+
+               if (unlikely(wr->num_sge > qp->rq.max_gs)) {
+                       err = -EINVAL;
+                       *bad_wr = wr;
+                       goto out;
+               }
+
+               for (i = 0; i < wr->num_sge; ++i) {
+                       ((struct mthca_data_seg *) wqe)->byte_count =
+                               cpu_to_be32(wr->sg_list[i].length);
+                       ((struct mthca_data_seg *) wqe)->lkey =
+                               cpu_to_be32(wr->sg_list[i].lkey);
+                       ((struct mthca_data_seg *) wqe)->addr =
+                               cpu_to_be64(wr->sg_list[i].addr);
+                       wqe += sizeof (struct mthca_data_seg);
+               }
+
+               if (i < qp->rq.max_gs) {
+                       ((struct mthca_data_seg *) wqe)->byte_count = 0;
+                       ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
+                       ((struct mthca_data_seg *) wqe)->addr = 0;
+               }
+
+               qp->wrid[ind] = wr->wr_id;
+
+               ++ind;
+               if (unlikely(ind >= qp->rq.max))
+                       ind -= qp->rq.max;
+       }
+out:
+       if (likely(nreq)) {
+               qp->rq.head += nreq;
+
+               /*
+                * Make sure that descriptors are written before
+                * doorbell record.
+                */
+               wmb();
+               *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
+       }
 
-       spin_unlock_irqrestore(&qp->lock, flags);
+       spin_unlock_irqrestore(&qp->rq.lock, flags);
        return err;
 }
 
-int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,
-                      int index, int *dbd, u32 *new_wqe)
+void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
+                       int index, int *dbd, __be32 *new_wqe)
 {
        struct mthca_next_seg *next;
 
+       /*
+        * For SRQs, all WQEs generate a CQE, so we're always at the
+        * end of the doorbell chain.
+        */
+       if (qp->ibqp.srq) {
+               *new_wqe = 0;
+               return;
+       }
+
        if (is_send)
                next = get_send_wqe(qp, index);
        else
@@ -1545,11 +2246,9 @@ int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,
                        (next->ee_nds & cpu_to_be32(0x3f));
        else
                *new_wqe = 0;
-
-       return 0;
 }
 
-int __devinit mthca_init_qp_table(struct mthca_dev *dev)
+int mthca_init_qp_table(struct mthca_dev *dev)
 {
        int err;
        u8 status;
@@ -1603,7 +2302,7 @@ int __devinit mthca_init_qp_table(struct mthca_dev *dev)
        return err;
 }
 
-void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
+void mthca_cleanup_qp_table(struct mthca_dev *dev)
 {
        int i;
        u8 status;
@@ -1611,5 +2310,6 @@ void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
        for (i = 0; i < 2; ++i)
                mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
 
+       mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
        mthca_alloc_cleanup(&dev->qp_table.alloc);
 }