/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* We have one global lock that protects dev->cq/qp_table. Each
* struct mthca_cq/qp also has its own lock. An individual qp lock
* may be taken inside of an individual cq lock. Both cqs attached to
- * a qp may be locked, with the cq with the lower cqn locked first.
- * No other nesting should be done.
+ * a qp may be locked, with the send cq locked first. No other
+ * nesting should be done.
*
- * Each struct mthca_cq/qp also has an ref count, protected by the
- * corresponding table lock. The pointer from the cq/qp_table to the
- * struct counts as one reference. This reference also is good for
- * access through the consumer API, so modifying the CQ/QP etc doesn't
- * need to take another reference. Access to a QP because of a
- * completion being polled does not need a reference either.
+ * Each struct mthca_cq/qp also has an atomic_t ref count. The
+ * pointer from the cq/qp_table to the struct counts as one reference.
+ * This reference also is good for access through the consumer API, so
+ * modifying the CQ/QP etc doesn't need to take another reference.
+ * Access because of a completion being polled does need a reference.
*
* Finally, each struct mthca_cq/qp has a wait_queue_head_t for the
* destroy function to sleep on.
* - decrement ref count; if zero, wake up waiters
*
* To destroy a CQ/QP, we can do the following:
- * - lock cq/qp_table
- * - remove pointer and decrement ref count
- * - unlock cq/qp_table lock
+ * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock
+ * - decrement ref count
* - wait_event until ref count is zero
*
* It is the consumer's responsibilty to make sure that no QP
- * operations (WQE posting or state modification) are pending when a
+ * operations (WQE posting or state modification) are pending when the
* QP is destroyed. Also, the consumer must make sure that calls to
- * qp_modify are serialized. Similarly, the consumer is responsible
- * for ensuring that no CQ resize operations are pending when a CQ
- * is destroyed.
+ * qp_modify are serialized.
*
* Possible optimizations (wait for profile data to see if/where we
* have locks bouncing between CPUs):
* send queue and one for the receive queue)
*/
-struct mthca_cq_buf {
- union mthca_buf queue;
- struct mthca_mr mr;
- int is_direct;
-};
-
-struct mthca_cq_resize {
- struct mthca_cq_buf buf;
- int cqe;
- enum {
- CQ_RESIZE_ALLOC,
- CQ_RESIZE_READY,
- CQ_RESIZE_SWAPPED
- } state;
-};
-
struct mthca_cq {
- struct ib_cq ibcq;
- spinlock_t lock;
- int refcount;
- int cqn;
- u32 cons_index;
- struct mthca_cq_buf buf;
- struct mthca_cq_resize *resize_buf;
- int is_kernel;
+ struct ib_cq ibcq;
+ spinlock_t lock;
+ atomic_t refcount;
+ int cqn;
+ u32 cons_index;
+ int is_direct;
+ int is_kernel;
/* Next fields are Arbel only */
- int set_ci_db_index;
- __be32 *set_ci_db;
- int arm_db_index;
- __be32 *arm_db;
- int arm_sn;
+ int set_ci_db_index;
+ __be32 *set_ci_db;
+ int arm_db_index;
+ __be32 *arm_db;
+ int arm_sn;
- wait_queue_head_t wait;
- struct mutex mutex;
+ union mthca_buf queue;
+ struct mthca_mr mr;
+ wait_queue_head_t wait;
};
struct mthca_srq {
struct ib_srq ibsrq;
spinlock_t lock;
- int refcount;
+ atomic_t refcount;
int srqn;
int max;
int max_gs;
struct mthca_mr mr;
wait_queue_head_t wait;
- struct mutex mutex;
};
struct mthca_wq {
struct mthca_qp {
struct ib_qp ibqp;
- int refcount;
+ atomic_t refcount;
u32 qpn;
int is_direct;
- u8 port; /* for SQP and memfree use only */
- u8 alt_port; /* for memfree use only */
u8 transport;
u8 state;
u8 atomic_rd_en;
union mthca_buf queue;
wait_queue_head_t wait;
- struct mutex mutex;
};
struct mthca_sqp {
struct mthca_qp qp;
+ int port;
int pkey_index;
u32 qkey;
u32 send_psn;