2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
37 #include <linux/version.h>
38 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
49 MODULE_AUTHOR("Roland Dreier");
50 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
51 MODULE_LICENSE("Dual BSD/GPL");
53 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
56 module_param(debug_level, int, 0644);
57 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
60 static const u8 ipv4_bcast_addr[] = {
61 0x00, 0xff, 0xff, 0xff,
62 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
66 struct workqueue_struct *ipoib_workqueue;
68 static void ipoib_add_one(struct ib_device *device);
69 static void ipoib_remove_one(struct ib_device *device);
71 static struct ib_client ipoib_client = {
74 .remove = ipoib_remove_one
77 int ipoib_open(struct net_device *dev)
79 struct ipoib_dev_priv *priv = netdev_priv(dev);
81 ipoib_dbg(priv, "bringing up interface\n");
83 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
85 if (ipoib_pkey_dev_delay_open(dev))
88 if (ipoib_ib_dev_open(dev))
91 if (ipoib_ib_dev_up(dev))
94 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
95 struct ipoib_dev_priv *cpriv;
97 /* Bring up any child interfaces too */
98 down(&priv->vlan_mutex);
99 list_for_each_entry(cpriv, &priv->child_intfs, list) {
102 flags = cpriv->dev->flags;
106 dev_change_flags(cpriv->dev, flags | IFF_UP);
108 up(&priv->vlan_mutex);
111 netif_start_queue(dev);
116 static int ipoib_stop(struct net_device *dev)
118 struct ipoib_dev_priv *priv = netdev_priv(dev);
120 ipoib_dbg(priv, "stopping interface\n");
122 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
124 netif_stop_queue(dev);
126 ipoib_ib_dev_down(dev);
127 ipoib_ib_dev_stop(dev);
129 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
130 struct ipoib_dev_priv *cpriv;
132 /* Bring down any child interfaces too */
133 down(&priv->vlan_mutex);
134 list_for_each_entry(cpriv, &priv->child_intfs, list) {
137 flags = cpriv->dev->flags;
138 if (!(flags & IFF_UP))
141 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
143 up(&priv->vlan_mutex);
149 static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
151 struct ipoib_dev_priv *priv = netdev_priv(dev);
153 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
156 priv->admin_mtu = new_mtu;
158 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
163 static struct ipoib_path *__path_find(struct net_device *dev,
166 struct ipoib_dev_priv *priv = netdev_priv(dev);
167 struct rb_node *n = priv->path_tree.rb_node;
168 struct ipoib_path *path;
172 path = rb_entry(n, struct ipoib_path, rb_node);
174 ret = memcmp(gid->raw, path->pathrec.dgid.raw,
175 sizeof (union ib_gid));
188 static int __path_add(struct net_device *dev, struct ipoib_path *path)
190 struct ipoib_dev_priv *priv = netdev_priv(dev);
191 struct rb_node **n = &priv->path_tree.rb_node;
192 struct rb_node *pn = NULL;
193 struct ipoib_path *tpath;
198 tpath = rb_entry(pn, struct ipoib_path, rb_node);
200 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
201 sizeof (union ib_gid));
210 rb_link_node(&path->rb_node, pn, n);
211 rb_insert_color(&path->rb_node, &priv->path_tree);
213 list_add_tail(&path->list, &priv->path_list);
218 static void __path_free(struct net_device *dev, struct ipoib_path *path)
220 struct ipoib_dev_priv *priv = netdev_priv(dev);
221 struct ipoib_neigh *neigh, *tn;
224 while ((skb = __skb_dequeue(&path->queue)))
225 dev_kfree_skb_irq(skb);
227 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
229 ipoib_put_ah(neigh->ah);
230 *to_ipoib_neigh(neigh->neighbour) = NULL;
231 neigh->neighbour->ops->destructor = NULL;
236 ipoib_put_ah(path->ah);
238 rb_erase(&path->rb_node, &priv->path_tree);
239 list_del(&path->list);
243 void ipoib_flush_paths(struct net_device *dev)
245 struct ipoib_dev_priv *priv = netdev_priv(dev);
246 struct ipoib_path *path, *tp;
247 LIST_HEAD(remove_list);
250 spin_lock_irqsave(&priv->lock, flags);
251 list_splice(&priv->path_list, &remove_list);
252 INIT_LIST_HEAD(&priv->path_list);
253 spin_unlock_irqrestore(&priv->lock, flags);
255 list_for_each_entry_safe(path, tp, &remove_list, list) {
257 ib_sa_cancel_query(path->query_id, path->query);
258 wait_for_completion(&path->done);
259 __path_free(dev, path);
263 static void path_rec_completion(int status,
264 struct ib_sa_path_rec *pathrec,
267 struct ipoib_path *path = path_ptr;
268 struct net_device *dev = path->dev;
269 struct ipoib_dev_priv *priv = netdev_priv(dev);
270 struct ipoib_ah *ah = NULL;
271 struct ipoib_neigh *neigh;
272 struct sk_buff_head skqueue;
277 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
278 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
280 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
281 status, IPOIB_GID_ARG(path->pathrec.dgid));
283 skb_queue_head_init(&skqueue);
286 struct ib_ah_attr av = {
287 .dlid = be16_to_cpu(pathrec->dlid),
289 .port_num = priv->port
292 if (ib_sa_rate_enum_to_int(pathrec->rate) > 0)
293 av.static_rate = (2 * priv->local_rate -
294 ib_sa_rate_enum_to_int(pathrec->rate) - 1) /
295 (priv->local_rate ? priv->local_rate : 1);
297 ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
298 av.static_rate, priv->local_rate,
299 ib_sa_rate_enum_to_int(pathrec->rate));
301 ah = ipoib_create_ah(dev, priv->pd, &av);
304 spin_lock_irqsave(&priv->lock, flags);
309 path->pathrec = *pathrec;
311 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
312 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
314 while ((skb = __skb_dequeue(&path->queue)))
315 __skb_queue_tail(&skqueue, skb);
317 list_for_each_entry(neigh, &path->neigh_list, list) {
318 kref_get(&path->ah->ref);
319 neigh->ah = path->ah;
321 while ((skb = __skb_dequeue(&neigh->queue)))
322 __skb_queue_tail(&skqueue, skb);
327 complete(&path->done);
329 spin_unlock_irqrestore(&priv->lock, flags);
331 while ((skb = __skb_dequeue(&skqueue))) {
333 if (dev_queue_xmit(skb))
334 ipoib_warn(priv, "dev_queue_xmit failed "
335 "to requeue packet\n");
339 static struct ipoib_path *path_rec_create(struct net_device *dev,
342 struct ipoib_dev_priv *priv = netdev_priv(dev);
343 struct ipoib_path *path;
345 path = kmalloc(sizeof *path, GFP_ATOMIC);
350 path->pathrec.dlid = 0;
352 skb_queue_head_init(&path->queue);
354 INIT_LIST_HEAD(&path->neigh_list);
356 init_completion(&path->done);
358 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
359 path->pathrec.sgid = priv->local_gid;
360 path->pathrec.pkey = cpu_to_be16(priv->pkey);
361 path->pathrec.numb_path = 1;
363 __path_add(dev, path);
368 static int path_rec_start(struct net_device *dev,
369 struct ipoib_path *path)
371 struct ipoib_dev_priv *priv = netdev_priv(dev);
373 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
374 IPOIB_GID_ARG(path->pathrec.dgid));
377 ib_sa_path_rec_get(priv->ca, priv->port,
379 IB_SA_PATH_REC_DGID |
380 IB_SA_PATH_REC_SGID |
381 IB_SA_PATH_REC_NUMB_PATH |
386 if (path->query_id < 0) {
387 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
389 return path->query_id;
395 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
397 struct ipoib_dev_priv *priv = netdev_priv(dev);
398 struct ipoib_path *path;
399 struct ipoib_neigh *neigh;
401 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
403 ++priv->stats.tx_dropped;
404 dev_kfree_skb_any(skb);
408 skb_queue_head_init(&neigh->queue);
409 neigh->neighbour = skb->dst->neighbour;
410 *to_ipoib_neigh(skb->dst->neighbour) = neigh;
413 * We can only be called from ipoib_start_xmit, so we're
414 * inside tx_lock -- no need to save/restore flags.
416 spin_lock(&priv->lock);
418 path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4));
420 path = path_rec_create(dev,
421 (union ib_gid *) (skb->dst->neighbour->ha + 4));
426 list_add_tail(&neigh->list, &path->neigh_list);
428 if (path->pathrec.dlid) {
429 kref_get(&path->ah->ref);
430 neigh->ah = path->ah;
432 ipoib_send(dev, skb, path->ah,
433 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
436 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
437 __skb_queue_tail(&neigh->queue, skb);
439 ++priv->stats.tx_dropped;
440 dev_kfree_skb_any(skb);
443 if (!path->query && path_rec_start(dev, path))
447 spin_unlock(&priv->lock);
451 *to_ipoib_neigh(skb->dst->neighbour) = NULL;
452 list_del(&neigh->list);
454 neigh->neighbour->ops->destructor = NULL;
456 ++priv->stats.tx_dropped;
457 dev_kfree_skb_any(skb);
459 spin_unlock(&priv->lock);
462 static void path_lookup(struct sk_buff *skb, struct net_device *dev)
464 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
466 /* Look up path record for unicasts */
467 if (skb->dst->neighbour->ha[4] != 0xff) {
468 neigh_add_path(skb, dev);
472 /* Add in the P_Key for multicasts */
473 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
474 skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
475 ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb);
478 static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
479 struct ipoib_pseudoheader *phdr)
481 struct ipoib_dev_priv *priv = netdev_priv(dev);
482 struct ipoib_path *path;
485 * We can only be called from ipoib_start_xmit, so we're
486 * inside tx_lock -- no need to save/restore flags.
488 spin_lock(&priv->lock);
490 path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4));
492 path = path_rec_create(dev,
493 (union ib_gid *) (phdr->hwaddr + 4));
495 /* put pseudoheader back on for next time */
496 skb_push(skb, sizeof *phdr);
497 __skb_queue_tail(&path->queue, skb);
499 if (path_rec_start(dev, path))
500 __path_free(dev, path);
502 ++priv->stats.tx_dropped;
503 dev_kfree_skb_any(skb);
506 spin_unlock(&priv->lock);
510 if (path->pathrec.dlid) {
511 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
512 be16_to_cpu(path->pathrec.dlid));
514 ipoib_send(dev, skb, path->ah,
515 be32_to_cpup((__be32 *) phdr->hwaddr));
516 } else if ((path->query || !path_rec_start(dev, path)) &&
517 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
518 /* put pseudoheader back on for next time */
519 skb_push(skb, sizeof *phdr);
520 __skb_queue_tail(&path->queue, skb);
522 ++priv->stats.tx_dropped;
523 dev_kfree_skb_any(skb);
526 spin_unlock(&priv->lock);
529 static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
531 struct ipoib_dev_priv *priv = netdev_priv(dev);
532 struct ipoib_neigh *neigh;
535 local_irq_save(flags);
536 if (!spin_trylock(&priv->tx_lock)) {
537 local_irq_restore(flags);
538 return NETDEV_TX_LOCKED;
542 * Check if our queue is stopped. Since we have the LLTX bit
543 * set, we can't rely on netif_stop_queue() preventing our
544 * xmit function from being called with a full queue.
546 if (unlikely(netif_queue_stopped(dev))) {
547 spin_unlock_irqrestore(&priv->tx_lock, flags);
548 return NETDEV_TX_BUSY;
551 if (skb->dst && skb->dst->neighbour) {
552 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
553 path_lookup(skb, dev);
557 neigh = *to_ipoib_neigh(skb->dst->neighbour);
559 if (likely(neigh->ah)) {
560 ipoib_send(dev, skb, neigh->ah,
561 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
565 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
566 spin_lock(&priv->lock);
567 __skb_queue_tail(&neigh->queue, skb);
568 spin_unlock(&priv->lock);
570 ++priv->stats.tx_dropped;
571 dev_kfree_skb_any(skb);
574 struct ipoib_pseudoheader *phdr =
575 (struct ipoib_pseudoheader *) skb->data;
576 skb_pull(skb, sizeof *phdr);
578 if (phdr->hwaddr[4] == 0xff) {
579 /* Add in the P_Key for multicast*/
580 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
581 phdr->hwaddr[9] = priv->pkey & 0xff;
583 ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
585 /* unicast GID -- should be ARP reply */
587 if (be16_to_cpup((u16 *) skb->data) != ETH_P_ARP) {
588 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
590 skb->dst ? "neigh" : "dst",
591 be16_to_cpup((u16 *) skb->data),
592 be32_to_cpup((u32 *) phdr->hwaddr),
593 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
594 dev_kfree_skb_any(skb);
595 ++priv->stats.tx_dropped;
599 unicast_arp_send(skb, dev, phdr);
604 spin_unlock_irqrestore(&priv->tx_lock, flags);
609 static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
611 struct ipoib_dev_priv *priv = netdev_priv(dev);
616 static void ipoib_timeout(struct net_device *dev)
618 struct ipoib_dev_priv *priv = netdev_priv(dev);
620 ipoib_warn(priv, "transmit timeout: latency %ld\n",
621 jiffies - dev->trans_start);
622 /* XXX reset QP, etc. */
625 static int ipoib_hard_header(struct sk_buff *skb,
626 struct net_device *dev,
628 void *daddr, void *saddr, unsigned len)
630 struct ipoib_header *header;
632 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
634 header->proto = htons(type);
635 header->reserved = 0;
638 * If we don't have a neighbour structure, stuff the
639 * destination address onto the front of the skb so we can
640 * figure out where to send the packet later.
642 if (!skb->dst || !skb->dst->neighbour) {
643 struct ipoib_pseudoheader *phdr =
644 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
645 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
651 static void ipoib_set_mcast_list(struct net_device *dev)
653 struct ipoib_dev_priv *priv = netdev_priv(dev);
655 schedule_work(&priv->restart_task);
658 static void ipoib_neigh_destructor(struct neighbour *n)
660 struct ipoib_neigh *neigh = *to_ipoib_neigh(n);
661 struct ipoib_dev_priv *priv = netdev_priv(n->dev);
665 "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
666 be32_to_cpup((__be32 *) n->ha),
667 IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4))));
669 spin_lock_irqsave(&priv->lock, flags);
673 ipoib_put_ah(neigh->ah);
674 list_del(&neigh->list);
675 *to_ipoib_neigh(n) = NULL;
679 spin_unlock_irqrestore(&priv->lock, flags);
682 static int ipoib_neigh_setup(struct neighbour *neigh)
685 * Is this kosher? I can't find anybody in the kernel that
686 * sets neigh->destructor, so we should be able to set it here
689 neigh->ops->destructor = ipoib_neigh_destructor;
694 static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
696 parms->neigh_setup = ipoib_neigh_setup;
701 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
703 struct ipoib_dev_priv *priv = netdev_priv(dev);
705 /* Allocate RX/TX "rings" to hold queued skbs */
707 priv->rx_ring = kmalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf),
709 if (!priv->rx_ring) {
710 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
711 ca->name, IPOIB_RX_RING_SIZE);
714 memset(priv->rx_ring, 0,
715 IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf));
717 priv->tx_ring = kmalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf),
719 if (!priv->tx_ring) {
720 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
721 ca->name, IPOIB_TX_RING_SIZE);
722 goto out_rx_ring_cleanup;
724 memset(priv->tx_ring, 0,
725 IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf));
727 /* priv->tx_head & tx_tail are already 0 */
729 if (ipoib_ib_dev_init(dev, ca, port))
730 goto out_tx_ring_cleanup;
735 kfree(priv->tx_ring);
738 kfree(priv->rx_ring);
744 void ipoib_dev_cleanup(struct net_device *dev)
746 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
748 ipoib_delete_debug_file(dev);
750 /* Delete any child interfaces first */
751 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
752 unregister_netdev(cpriv->dev);
753 ipoib_dev_cleanup(cpriv->dev);
754 free_netdev(cpriv->dev);
757 ipoib_ib_dev_cleanup(dev);
760 kfree(priv->rx_ring);
761 priv->rx_ring = NULL;
765 kfree(priv->tx_ring);
766 priv->tx_ring = NULL;
770 static void ipoib_setup(struct net_device *dev)
772 struct ipoib_dev_priv *priv = netdev_priv(dev);
774 dev->open = ipoib_open;
775 dev->stop = ipoib_stop;
776 dev->change_mtu = ipoib_change_mtu;
777 dev->hard_start_xmit = ipoib_start_xmit;
778 dev->get_stats = ipoib_get_stats;
779 dev->tx_timeout = ipoib_timeout;
780 dev->hard_header = ipoib_hard_header;
781 dev->set_multicast_list = ipoib_set_mcast_list;
782 dev->neigh_setup = ipoib_neigh_setup_dev;
784 dev->watchdog_timeo = HZ;
786 dev->rebuild_header = NULL;
787 dev->set_mac_address = NULL;
788 dev->header_cache_update = NULL;
790 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
793 * We add in INFINIBAND_ALEN to allow for the destination
794 * address "pseudoheader" for skbs without neighbour struct.
796 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
797 dev->addr_len = INFINIBAND_ALEN;
798 dev->type = ARPHRD_INFINIBAND;
799 dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2;
800 dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
802 /* MTU will be reset when mcast join happens */
803 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
804 priv->mcast_mtu = priv->admin_mtu = dev->mtu;
806 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
808 netif_carrier_off(dev);
810 SET_MODULE_OWNER(dev);
814 spin_lock_init(&priv->lock);
815 spin_lock_init(&priv->tx_lock);
817 init_MUTEX(&priv->mcast_mutex);
818 init_MUTEX(&priv->vlan_mutex);
820 INIT_LIST_HEAD(&priv->path_list);
821 INIT_LIST_HEAD(&priv->child_intfs);
822 INIT_LIST_HEAD(&priv->dead_ahs);
823 INIT_LIST_HEAD(&priv->multicast_list);
825 INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
826 INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
827 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
828 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
829 INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
832 struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
834 struct net_device *dev;
836 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
841 return netdev_priv(dev);
844 static ssize_t show_pkey(struct class_device *cdev, char *buf)
846 struct ipoib_dev_priv *priv =
847 netdev_priv(container_of(cdev, struct net_device, class_dev));
849 return sprintf(buf, "0x%04x\n", priv->pkey);
851 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
853 static ssize_t create_child(struct class_device *cdev,
854 const char *buf, size_t count)
859 if (sscanf(buf, "%i", &pkey) != 1)
862 if (pkey < 0 || pkey > 0xffff)
865 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
868 return ret ? ret : count;
870 static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
872 static ssize_t delete_child(struct class_device *cdev,
873 const char *buf, size_t count)
878 if (sscanf(buf, "%i", &pkey) != 1)
881 if (pkey < 0 || pkey > 0xffff)
884 ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev),
887 return ret ? ret : count;
890 static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
892 int ipoib_add_pkey_attr(struct net_device *dev)
894 return class_device_create_file(&dev->class_dev,
895 &class_device_attr_pkey);
898 static struct net_device *ipoib_add_port(const char *format,
899 struct ib_device *hca, u8 port)
901 struct ipoib_dev_priv *priv;
902 int result = -ENOMEM;
904 priv = ipoib_intf_alloc(format);
906 goto alloc_mem_failed;
908 SET_NETDEV_DEV(priv->dev, hca->dma_device);
910 result = ib_query_pkey(hca, port, 0, &priv->pkey);
912 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
913 hca->name, port, result);
914 goto alloc_mem_failed;
917 priv->dev->broadcast[8] = priv->pkey >> 8;
918 priv->dev->broadcast[9] = priv->pkey & 0xff;
920 result = ib_query_gid(hca, port, 0, &priv->local_gid);
922 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
923 hca->name, port, result);
924 goto alloc_mem_failed;
926 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
929 result = ipoib_dev_init(priv->dev, hca, port);
931 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
932 hca->name, port, result);
933 goto device_init_failed;
936 INIT_IB_EVENT_HANDLER(&priv->event_handler,
937 priv->ca, ipoib_event);
938 result = ib_register_event_handler(&priv->event_handler);
940 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
941 "port %d (ret = %d)\n",
942 hca->name, port, result);
946 result = register_netdev(priv->dev);
948 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
949 hca->name, port, result);
950 goto register_failed;
953 if (ipoib_create_debug_file(priv->dev))
956 if (ipoib_add_pkey_attr(priv->dev))
958 if (class_device_create_file(&priv->dev->class_dev,
959 &class_device_attr_create_child))
961 if (class_device_create_file(&priv->dev->class_dev,
962 &class_device_attr_delete_child))
968 ipoib_delete_debug_file(priv->dev);
971 unregister_netdev(priv->dev);
974 ib_unregister_event_handler(&priv->event_handler);
977 ipoib_dev_cleanup(priv->dev);
980 free_netdev(priv->dev);
983 return ERR_PTR(result);
986 static void ipoib_add_one(struct ib_device *device)
988 struct list_head *dev_list;
989 struct net_device *dev;
990 struct ipoib_dev_priv *priv;
993 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
997 INIT_LIST_HEAD(dev_list);
999 if (device->node_type == IB_NODE_SWITCH) {
1004 e = device->phys_port_cnt;
1007 for (p = s; p <= e; ++p) {
1008 dev = ipoib_add_port("ib%d", device, p);
1010 priv = netdev_priv(dev);
1011 list_add_tail(&priv->list, dev_list);
1015 ib_set_client_data(device, &ipoib_client, dev_list);
1018 static void ipoib_remove_one(struct ib_device *device)
1020 struct ipoib_dev_priv *priv, *tmp;
1021 struct list_head *dev_list;
1023 dev_list = ib_get_client_data(device, &ipoib_client);
1025 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1026 ib_unregister_event_handler(&priv->event_handler);
1028 unregister_netdev(priv->dev);
1029 ipoib_dev_cleanup(priv->dev);
1030 free_netdev(priv->dev);
1034 static int __init ipoib_init_module(void)
1038 ret = ipoib_register_debugfs();
1043 * We create our own workqueue mainly because we want to be
1044 * able to flush it when devices are being removed. We can't
1045 * use schedule_work()/flush_scheduled_work() because both
1046 * unregister_netdev() and linkwatch_event take the rtnl lock,
1047 * so flush_scheduled_work() can deadlock during device
1050 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1051 if (!ipoib_workqueue) {
1056 ret = ib_register_client(&ipoib_client);
1063 ipoib_unregister_debugfs();
1066 destroy_workqueue(ipoib_workqueue);
1071 static void __exit ipoib_cleanup_module(void)
1073 ipoib_unregister_debugfs();
1074 ib_unregister_client(&ipoib_client);
1075 destroy_workqueue(ipoib_workqueue);
1078 module_init(ipoib_init_module);
1079 module_exit(ipoib_cleanup_module);