/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <ib_cache.h>
+#include <rdma/ib_cache.h>
#include "ipoib.h"
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
-int data_debug_level;
+static int data_debug_level;
module_param(data_debug_level, int, 0644);
MODULE_PARM_DESC(data_debug_level,
#define IPOIB_OP_RECV (1ul << 31)
-static DECLARE_MUTEX(pkey_sem);
+static DEFINE_MUTEX(pkey_mutex);
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
unsigned long flags;
- if (ah->last_send <= priv->tx_tail) {
+ if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
ipoib_dbg(priv, "Freeing ah %p\n", ah->ah);
ib_destroy_ah(ah->ah);
kfree(ah);
}
}
-static inline int ipoib_ib_receive(struct ipoib_dev_priv *priv,
- unsigned int wr_id,
- dma_addr_t addr)
+static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
- struct ib_sge list = {
- .addr = addr,
- .length = IPOIB_BUF_SIZE,
- .lkey = priv->mr->lkey,
- };
- struct ib_recv_wr param = {
- .wr_id = wr_id | IPOIB_OP_RECV,
- .sg_list = &list,
- .num_sge = 1,
- .recv_flags = IB_RECV_SIGNALED
- };
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ib_sge list;
+ struct ib_recv_wr param;
struct ib_recv_wr *bad_wr;
+ int ret;
+
+ list.addr = priv->rx_ring[id].mapping;
+ list.length = IPOIB_BUF_SIZE;
+ list.lkey = priv->mr->lkey;
+
+ param.next = NULL;
+ param.wr_id = id | IPOIB_OP_RECV;
+ param.sg_list = &list;
+ param.num_sge = 1;
+
+ ret = ib_post_recv(priv->qp, ¶m, &bad_wr);
+ if (unlikely(ret)) {
+ ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
+ dma_unmap_single(priv->ca->dma_device,
+ priv->rx_ring[id].mapping,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_ring[id].skb);
+ priv->rx_ring[id].skb = NULL;
+ }
- return ib_post_recv(priv->qp, ¶m, &bad_wr);
+ return ret;
}
-static int ipoib_ib_post_receive(struct net_device *dev, int id)
+static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
dma_addr_t addr;
- int ret;
skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
- if (!skb) {
- ipoib_warn(priv, "failed to allocate receive buffer\n");
-
- priv->rx_ring[id].skb = NULL;
+ if (!skb)
return -ENOMEM;
- }
- skb_reserve(skb, 4); /* 16 byte align IP header */
- priv->rx_ring[id].skb = skb;
+
+ /*
+ * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
+ * header. So we need 4 more bytes to get to 48 and align the
+ * IP header to a multiple of 16.
+ */
+ skb_reserve(skb, 4);
+
addr = dma_map_single(priv->ca->dma_device,
skb->data, IPOIB_BUF_SIZE,
DMA_FROM_DEVICE);
- pci_unmap_addr_set(&priv->rx_ring[id], mapping, addr);
-
- ret = ipoib_ib_receive(priv, id, addr);
- if (ret) {
- ipoib_warn(priv, "ipoib_ib_receive failed for buf %d (%d)\n",
- id, ret);
- priv->rx_ring[id].skb = NULL;
+ if (unlikely(dma_mapping_error(addr))) {
+ dev_kfree_skb_any(skb);
+ return -EIO;
}
- return ret;
+ priv->rx_ring[id].skb = skb;
+ priv->rx_ring[id].mapping = addr;
+
+ return 0;
}
static int ipoib_ib_post_receives(struct net_device *dev)
int i;
for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) {
+ if (ipoib_alloc_rx_skb(dev, i)) {
+ ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
+ return -ENOMEM;
+ }
if (ipoib_ib_post_receive(dev, i)) {
ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
return -EIO;
wr_id &= ~IPOIB_OP_RECV;
if (wr_id < IPOIB_RX_RING_SIZE) {
- struct sk_buff *skb = priv->rx_ring[wr_id].skb;
-
- priv->rx_ring[wr_id].skb = NULL;
-
- dma_unmap_single(priv->ca->dma_device,
- pci_unmap_addr(&priv->rx_ring[wr_id],
- mapping),
- IPOIB_BUF_SIZE,
- DMA_FROM_DEVICE);
+ struct sk_buff *skb = priv->rx_ring[wr_id].skb;
+ dma_addr_t addr = priv->rx_ring[wr_id].mapping;
- if (wc->status != IB_WC_SUCCESS) {
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv, "failed recv event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
+ dma_unmap_single(priv->ca->dma_device, addr,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
+ priv->rx_ring[wr_id].skb = NULL;
return;
}
+ /*
+ * If we can't allocate a new RX buffer, dump
+ * this packet and reuse the old buffer.
+ */
+ if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+ ++priv->stats.rx_dropped;
+ goto repost;
+ }
+
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
+ dma_unmap_single(priv->ca->dma_device, addr,
+ IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+
skb_put(skb, wc->byte_len);
skb_pull(skb, IB_GRH_BYTES);
if (wc->slid != priv->local_lid ||
wc->src_qp != priv->qp->qp_num) {
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
-
+ skb->mac.raw = skb->data;
skb_pull(skb, IPOIB_ENCAP_LEN);
dev->last_rx = jiffies;
dev_kfree_skb_any(skb);
}
- /* repost receive */
- if (ipoib_ib_post_receive(dev, wr_id))
+ repost:
+ if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
ipoib_warn(priv, "ipoib_ib_post_receive failed "
"for buf %d\n", wr_id);
} else
wr_id);
} else {
- struct ipoib_buf *tx_req;
+ struct ipoib_tx_buf *tx_req;
unsigned long flags;
if (wr_id >= IPOIB_TX_RING_SIZE) {
struct ipoib_ah *address, u32 qpn)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_buf *tx_req;
+ struct ipoib_tx_buf *tx_req;
dma_addr_t addr;
if (skb->len > dev->mtu + INFINIBAND_ALEN) {
spin_lock_irq(&priv->lock);
list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
- if (ah->last_send <= priv->tx_tail) {
+ if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
list_del(&ah->list);
list_add_tail(&ah->list, &remove_list);
}
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
- ret = ipoib_qp_create(dev);
+ ret = ipoib_init_qp(dev);
if (ret) {
- ipoib_warn(priv, "ipoib_qp_create returned %d\n", ret);
+ ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
return -1;
}
/* Shutdown the P_Key thread if still active */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
- down(&pkey_sem);
+ mutex_lock(&pkey_mutex);
set_bit(IPOIB_PKEY_STOP, &priv->flags);
cancel_delayed_work(&priv->pkey_task);
- up(&pkey_sem);
+ mutex_unlock(&pkey_mutex);
flush_workqueue(ipoib_workqueue);
}
- ipoib_mcast_stop_thread(dev);
-
- /*
- * Flush the multicast groups first so we stop any multicast joins. The
- * completion thread may have already died and we may deadlock waiting
- * for the completion thread to finish some multicast joins.
- */
+ ipoib_mcast_stop_thread(dev, 1);
ipoib_mcast_dev_flush(dev);
- /* Delete broadcast and local addresses since they will be recreated */
- ipoib_mcast_dev_down(dev);
-
ipoib_flush_paths(dev);
return 0;
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
- int attr_mask;
unsigned long begin;
- struct ipoib_buf *tx_req;
+ struct ipoib_tx_buf *tx_req;
int i;
- /* Kill the existing QP and allocate a new one */
+ /*
+ * Move our QP to the error state and then reinitialize in
+ * when all work requests have completed or have been flushed.
+ */
qp_attr.qp_state = IB_QPS_ERR;
- attr_mask = IB_QP_STATE;
- if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
/* Wait for all sends and receives to complete */
* assume the HW is wedged and just free up
* all our pending work requests.
*/
- while (priv->tx_tail < priv->tx_head) {
+ while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(IPOIB_TX_RING_SIZE - 1)];
dma_unmap_single(priv->ca->dma_device,
timeout:
qp_attr.qp_state = IB_QPS_RESET;
- attr_mask = IB_QP_STATE;
- if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
+ if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
/* Wait for all AHs to be reaped */
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
ipoib_ib_dev_up(dev);
+ mutex_lock(&priv->vlan_mutex);
+
/* Flush any child interfaces too */
list_for_each_entry(cpriv, &priv->child_intfs, list)
ipoib_ib_dev_flush(&cpriv->dev);
+
+ mutex_unlock(&priv->vlan_mutex);
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
ipoib_dbg(priv, "cleaning up ib_dev\n");
- ipoib_mcast_stop_thread(dev);
-
- /* Delete the broadcast address and the local address */
- ipoib_mcast_dev_down(dev);
+ ipoib_mcast_stop_thread(dev, 1);
+ ipoib_mcast_dev_flush(dev);
ipoib_transport_dev_cleanup(dev);
}
* Bug #2507. This implementation will probably be removed when the P_Key
* change async notification is available.
*/
-int ipoib_open(struct net_device *dev);
static void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
ipoib_open(dev);
else {
- down(&pkey_sem);
+ mutex_lock(&pkey_mutex);
if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
queue_delayed_work(ipoib_workqueue,
&priv->pkey_task,
HZ);
- up(&pkey_sem);
+ mutex_unlock(&pkey_mutex);
}
}
/* P_Key value not assigned yet - start polling */
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
- down(&pkey_sem);
+ mutex_lock(&pkey_mutex);
clear_bit(IPOIB_PKEY_STOP, &priv->flags);
queue_delayed_work(ipoib_workqueue,
&priv->pkey_task,
HZ);
- up(&pkey_sem);
+ mutex_unlock(&pkey_mutex);
return 1;
}