X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Finfiniband%2Fulp%2Fipoib%2Fipoib_ib.c;h=59d9594ed6d97e0fc243066f42f94c8f8f3c0ca6;hb=refs%2Fheads%2Fvserver;hp=86bcdd72a10706260bf2585fe5d3b5e8103e65e5;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 86bcdd72a..59d9594ed 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -84,15 +84,9 @@ void ipoib_free_ah(struct kref *kref) unsigned long flags; - if ((int) priv->tx_tail - (int) ah->last_send >= 0) { - ipoib_dbg(priv, "Freeing ah %p\n", ah->ah); - ib_destroy_ah(ah->ah); - kfree(ah); - } else { - spin_lock_irqsave(&priv->lock, flags); - list_add_tail(&ah->list, &priv->dead_ahs); - spin_unlock_irqrestore(&priv->lock, flags); - } + spin_lock_irqsave(&priv->lock, flags); + list_add_tail(&ah->list, &priv->dead_ahs); + spin_unlock_irqrestore(&priv->lock, flags); } static int ipoib_ib_post_receive(struct net_device *dev, int id) @@ -115,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id) ret = ib_post_recv(priv->qp, ¶m, &bad_wr); if (unlikely(ret)) { ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); - dma_unmap_single(priv->ca->dma_device, - priv->rx_ring[id].mapping, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); + ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(priv->rx_ring[id].skb); priv->rx_ring[id].skb = NULL; } @@ -129,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; - dma_addr_t addr; + u64 addr; skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); if (!skb) @@ -142,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) */ skb_reserve(skb, 4); - addr = dma_map_single(priv->ca->dma_device, - skb->data, IPOIB_BUF_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(addr))) { + addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { dev_kfree_skb_any(skb); return -EIO; } @@ -161,7 +153,7 @@ static int ipoib_ib_post_receives(struct net_device *dev) struct ipoib_dev_priv *priv = netdev_priv(dev); int i; - for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) { + for (i = 0; i < ipoib_recvq_size; ++i) { if (ipoib_alloc_rx_skb(dev, i)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); return -ENOMEM; @@ -175,116 +167,126 @@ static int ipoib_ib_post_receives(struct net_device *dev) return 0; } -static void ipoib_ib_handle_wc(struct net_device *dev, - struct ib_wc *wc) +static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); - unsigned int wr_id = wc->wr_id; + unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; + struct sk_buff *skb; + u64 addr; - ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n", + ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", wr_id, wc->opcode, wc->status); - if (wr_id & IPOIB_OP_RECV) { - wr_id &= ~IPOIB_OP_RECV; - - if (wr_id < IPOIB_RX_RING_SIZE) { - struct sk_buff *skb = priv->rx_ring[wr_id].skb; - dma_addr_t addr = priv->rx_ring[wr_id].mapping; - - if (unlikely(wc->status != IB_WC_SUCCESS)) { - if (wc->status != IB_WC_WR_FLUSH_ERR) - ipoib_warn(priv, "failed recv event " - "(status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); - dma_unmap_single(priv->ca->dma_device, addr, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - priv->rx_ring[wr_id].skb = NULL; - return; - } + if (unlikely(wr_id >= ipoib_recvq_size)) { + ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", + wr_id, ipoib_recvq_size); + return; + } - /* - * If we can't allocate a new RX buffer, dump - * this packet and reuse the old buffer. - */ - if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { - ++priv->stats.rx_dropped; - goto repost; - } + skb = priv->rx_ring[wr_id].skb; + addr = priv->rx_ring[wr_id].mapping; - ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", - wc->byte_len, wc->slid); + if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_WR_FLUSH_ERR) + ipoib_warn(priv, "failed recv event " + "(status=%d, wrid=%d vend_err %x)\n", + wc->status, wr_id, wc->vendor_err); + ib_dma_unmap_single(priv->ca, addr, + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + priv->rx_ring[wr_id].skb = NULL; + return; + } - dma_unmap_single(priv->ca->dma_device, addr, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); + /* + * If we can't allocate a new RX buffer, dump + * this packet and reuse the old buffer. + */ + if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { + ++priv->stats.rx_dropped; + goto repost; + } - skb_put(skb, wc->byte_len); - skb_pull(skb, IB_GRH_BYTES); + ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", + wc->byte_len, wc->slid); - if (wc->slid != priv->local_lid || - wc->src_qp != priv->qp->qp_num) { - skb->protocol = ((struct ipoib_header *) skb->data)->proto; - skb->mac.raw = skb->data; - skb_pull(skb, IPOIB_ENCAP_LEN); + ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); - dev->last_rx = jiffies; - ++priv->stats.rx_packets; - priv->stats.rx_bytes += skb->len; + skb_put(skb, wc->byte_len); + skb_pull(skb, IB_GRH_BYTES); - skb->dev = dev; - /* XXX get correct PACKET_ type here */ - skb->pkt_type = PACKET_HOST; - netif_rx_ni(skb); - } else { - ipoib_dbg_data(priv, "dropping loopback packet\n"); - dev_kfree_skb_any(skb); - } + if (wc->slid != priv->local_lid || + wc->src_qp != priv->qp->qp_num) { + skb->protocol = ((struct ipoib_header *) skb->data)->proto; + skb->mac.raw = skb->data; + skb_pull(skb, IPOIB_ENCAP_LEN); - repost: - if (unlikely(ipoib_ib_post_receive(dev, wr_id))) - ipoib_warn(priv, "ipoib_ib_post_receive failed " - "for buf %d\n", wr_id); - } else - ipoib_warn(priv, "completion event with wrid %d\n", - wr_id); + dev->last_rx = jiffies; + ++priv->stats.rx_packets; + priv->stats.rx_bytes += skb->len; + skb->dev = dev; + /* XXX get correct PACKET_ type here */ + skb->pkt_type = PACKET_HOST; + netif_rx_ni(skb); } else { - struct ipoib_tx_buf *tx_req; - unsigned long flags; + ipoib_dbg_data(priv, "dropping loopback packet\n"); + dev_kfree_skb_any(skb); + } - if (wr_id >= IPOIB_TX_RING_SIZE) { - ipoib_warn(priv, "completion event with wrid %d (> %d)\n", - wr_id, IPOIB_TX_RING_SIZE); - return; - } +repost: + if (unlikely(ipoib_ib_post_receive(dev, wr_id))) + ipoib_warn(priv, "ipoib_ib_post_receive failed " + "for buf %d\n", wr_id); +} - ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id); +static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) +{ + struct ipoib_dev_priv *priv = netdev_priv(dev); + unsigned int wr_id = wc->wr_id; + struct ipoib_tx_buf *tx_req; + unsigned long flags; - tx_req = &priv->tx_ring[wr_id]; + ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n", + wr_id, wc->opcode, wc->status); - dma_unmap_single(priv->ca->dma_device, - pci_unmap_addr(tx_req, mapping), - tx_req->skb->len, - DMA_TO_DEVICE); + if (unlikely(wr_id >= ipoib_sendq_size)) { + ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", + wr_id, ipoib_sendq_size); + return; + } - ++priv->stats.tx_packets; - priv->stats.tx_bytes += tx_req->skb->len; + tx_req = &priv->tx_ring[wr_id]; - dev_kfree_skb_any(tx_req->skb); + ib_dma_unmap_single(priv->ca, tx_req->mapping, + tx_req->skb->len, DMA_TO_DEVICE); - spin_lock_irqsave(&priv->tx_lock, flags); - ++priv->tx_tail; - if (netif_queue_stopped(dev) && - priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2) - netif_wake_queue(dev); - spin_unlock_irqrestore(&priv->tx_lock, flags); + ++priv->stats.tx_packets; + priv->stats.tx_bytes += tx_req->skb->len; - if (wc->status != IB_WC_SUCCESS && - wc->status != IB_WC_WR_FLUSH_ERR) - ipoib_warn(priv, "failed send event " - "(status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); - } + dev_kfree_skb_any(tx_req->skb); + + spin_lock_irqsave(&priv->tx_lock, flags); + ++priv->tx_tail; + if (netif_queue_stopped(dev) && + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && + priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) + netif_wake_queue(dev); + spin_unlock_irqrestore(&priv->tx_lock, flags); + + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) + ipoib_warn(priv, "failed send event " + "(status=%d, wrid=%d vend_err %x)\n", + wc->status, wr_id, wc->vendor_err); +} + +static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc) +{ + if (wc->wr_id & IPOIB_OP_RECV) + ipoib_ib_handle_rx_wc(dev, wc); + else + ipoib_ib_handle_tx_wc(dev, wc); } void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) @@ -304,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) static inline int post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 qpn, - dma_addr_t addr, int len) + u64 addr, int len) { struct ib_send_wr *bad_wr; @@ -323,9 +325,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_tx_buf *tx_req; - dma_addr_t addr; + u64 addr; - if (skb->len > dev->mtu + INFINIBAND_ALEN) { + if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, dev->mtu + INFINIBAND_ALEN); ++priv->stats.tx_dropped; @@ -344,18 +346,22 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, * means we have to make sure everything is properly recorded and * our state is consistent before we call post_send(). */ - tx_req = &priv->tx_ring[priv->tx_head & (IPOIB_TX_RING_SIZE - 1)]; + tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; - addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, - DMA_TO_DEVICE); - pci_unmap_addr_set(tx_req, mapping, addr); + addr = ib_dma_map_single(priv->ca, skb->data, skb->len, + DMA_TO_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { + ++priv->stats.tx_errors; + dev_kfree_skb_any(skb); + return; + } + tx_req->mapping = addr; - if (unlikely(post_send(priv, priv->tx_head & (IPOIB_TX_RING_SIZE - 1), + if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, addr, skb->len))) { ipoib_warn(priv, "post_send failed\n"); ++priv->stats.tx_errors; - dma_unmap_single(priv->ca->dma_device, addr, skb->len, - DMA_TO_DEVICE); + ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } else { dev->trans_start = jiffies; @@ -363,7 +369,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, address->last_send = priv->tx_head; ++priv->tx_head; - if (priv->tx_head - priv->tx_tail == IPOIB_TX_RING_SIZE) { + if (priv->tx_head - priv->tx_tail == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); netif_stop_queue(dev); } @@ -376,25 +382,23 @@ static void __ipoib_reap_ah(struct net_device *dev) struct ipoib_ah *ah, *tah; LIST_HEAD(remove_list); - spin_lock_irq(&priv->lock); + spin_lock_irq(&priv->tx_lock); + spin_lock(&priv->lock); list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) if ((int) priv->tx_tail - (int) ah->last_send >= 0) { list_del(&ah->list); - list_add_tail(&ah->list, &remove_list); + ib_destroy_ah(ah->ah); + kfree(ah); } - spin_unlock_irq(&priv->lock); - - list_for_each_entry_safe(ah, tah, &remove_list, list) { - ipoib_dbg(priv, "Reaping ah %p\n", ah->ah); - ib_destroy_ah(ah->ah); - kfree(ah); - } + spin_unlock(&priv->lock); + spin_unlock_irq(&priv->tx_lock); } -void ipoib_reap_ah(void *dev_ptr) +void ipoib_reap_ah(struct work_struct *work) { - struct net_device *dev = dev_ptr; - struct ipoib_dev_priv *priv = netdev_priv(dev); + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, ah_reap_task.work); + struct net_device *dev = priv->dev; __ipoib_reap_ah(dev); @@ -416,25 +420,46 @@ int ipoib_ib_dev_open(struct net_device *dev) ret = ipoib_ib_post_receives(dev); if (ret) { ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); + ipoib_ib_dev_stop(dev); return -1; } clear_bit(IPOIB_STOP_REAPER, &priv->flags); queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); + set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + return 0; } +static void ipoib_pkey_dev_check_presence(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = netdev_priv(dev); + u16 pkey_index = 0; + + if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) + clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); + else + set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); +} + int ipoib_ib_dev_up(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); + ipoib_pkey_dev_check_presence(dev); + + if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { + ipoib_dbg(priv, "PKEY is not assigned.\n"); + return 0; + } + set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); return ipoib_mcast_start_thread(dev); } -int ipoib_ib_dev_down(struct net_device *dev) +int ipoib_ib_dev_down(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -449,10 +474,11 @@ int ipoib_ib_dev_down(struct net_device *dev) set_bit(IPOIB_PKEY_STOP, &priv->flags); cancel_delayed_work(&priv->pkey_task); mutex_unlock(&pkey_mutex); - flush_workqueue(ipoib_workqueue); + if (flush) + flush_workqueue(ipoib_workqueue); } - ipoib_mcast_stop_thread(dev, 1); + ipoib_mcast_stop_thread(dev, flush); ipoib_mcast_dev_flush(dev); ipoib_flush_paths(dev); @@ -466,7 +492,7 @@ static int recvs_pending(struct net_device *dev) int pending = 0; int i; - for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) + for (i = 0; i < ipoib_recvq_size; ++i) if (priv->rx_ring[i].skb) ++pending; @@ -481,6 +507,8 @@ int ipoib_ib_dev_stop(struct net_device *dev) struct ipoib_tx_buf *tx_req; int i; + clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + /* * Move our QP to the error state and then reinitialize in * when all work requests have completed or have been flushed. @@ -503,25 +531,28 @@ int ipoib_ib_dev_stop(struct net_device *dev) */ while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & - (IPOIB_TX_RING_SIZE - 1)]; - dma_unmap_single(priv->ca->dma_device, - pci_unmap_addr(tx_req, mapping), - tx_req->skb->len, - DMA_TO_DEVICE); + (ipoib_sendq_size - 1)]; + ib_dma_unmap_single(priv->ca, + tx_req->mapping, + tx_req->skb->len, + DMA_TO_DEVICE); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; } - for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) - if (priv->rx_ring[i].skb) { - dma_unmap_single(priv->ca->dma_device, - pci_unmap_addr(&priv->rx_ring[i], - mapping), - IPOIB_BUF_SIZE, - DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_ring[i].skb); - priv->rx_ring[i].skb = NULL; - } + for (i = 0; i < ipoib_recvq_size; ++i) { + struct ipoib_rx_buf *rx_req; + + rx_req = &priv->rx_ring[i]; + if (!rx_req->skb) + continue; + ib_dma_unmap_single(priv->ca, + rx_req->mapping, + IPOIB_BUF_SIZE, + DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_req->skb); + rx_req->skb = NULL; + } goto timeout; } @@ -580,30 +611,40 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) return 0; } -void ipoib_ib_dev_flush(void *_dev) +void ipoib_ib_dev_flush(struct work_struct *work) { - struct net_device *dev = (struct net_device *)_dev; - struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; + struct ipoib_dev_priv *cpriv, *priv = + container_of(work, struct ipoib_dev_priv, flush_task); + struct net_device *dev = priv->dev; - if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { + ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); return; + } + + if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { + ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); + return; + } ipoib_dbg(priv, "flushing\n"); - ipoib_ib_dev_down(dev); + ipoib_ib_dev_down(dev, 0); /* * The device could have been brought down between the start and when * we get here, don't bring it back up if it's not configured up */ - if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { ipoib_ib_dev_up(dev); + ipoib_mcast_restart_task(&priv->restart_task); + } mutex_lock(&priv->vlan_mutex); /* Flush any child interfaces too */ list_for_each_entry(cpriv, &priv->child_intfs, list) - ipoib_ib_dev_flush(&cpriv->dev); + ipoib_ib_dev_flush(&cpriv->flush_task); mutex_unlock(&priv->vlan_mutex); } @@ -630,21 +671,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) * change async notification is available. */ -static void ipoib_pkey_dev_check_presence(struct net_device *dev) -{ - struct ipoib_dev_priv *priv = netdev_priv(dev); - u16 pkey_index = 0; - - if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) - clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); - else - set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); -} - -void ipoib_pkey_poll(void *dev_ptr) +void ipoib_pkey_poll(struct work_struct *work) { - struct net_device *dev = dev_ptr; - struct ipoib_dev_priv *priv = netdev_priv(dev); + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, pkey_task.work); + struct net_device *dev = priv->dev; ipoib_pkey_dev_check_presence(dev);