fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / drivers / infiniband / ulp / ipoib / ipoib_multicast.c
index 1dae4b2..b04b72c 100644 (file)
@@ -114,7 +114,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
                 */
                if (neigh->ah)
                        ipoib_put_ah(neigh->ah);
-               ipoib_neigh_free(neigh);
+               ipoib_neigh_free(dev, neigh);
        }
 
        spin_unlock_irqrestore(&priv->lock, flags);
@@ -154,7 +154,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
        return mcast;
 }
 
-static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, union ib_gid *mgid)
+static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct rb_node *n = priv->multicast_tree.rb_node;
@@ -165,7 +165,7 @@ static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, union ib_g
 
                mcast = rb_entry(n, struct ipoib_mcast, rb_node);
 
-               ret = memcmp(mgid->raw, mcast->mcmember.mgid.raw,
+               ret = memcmp(mgid, mcast->mcmember.mgid.raw,
                             sizeof (union ib_gid));
                if (ret < 0)
                        n = n->rb_left;
@@ -264,6 +264,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                if (!ah) {
                        ipoib_warn(priv, "ib_address_create failed\n");
                } else {
+                       spin_lock_irq(&priv->lock);
+                       mcast->ah = ah;
+                       spin_unlock_irq(&priv->lock);
+
                        ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
                                        " AV %p, LID 0x%04x, SL %d\n",
                                        IPOIB_GID_ARG(mcast->mcmember.mgid),
@@ -271,10 +275,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                                        be16_to_cpu(mcast->mcmember.mlid),
                                        mcast->mcmember.sl);
                }
-
-               spin_lock_irq(&priv->lock);
-               mcast->ah = ah;
-               spin_unlock_irq(&priv->lock);
        }
 
        /* actually send any queued packets */
@@ -326,6 +326,7 @@ ipoib_mcast_sendonly_join_complete(int status,
 
                /* Clear the busy flag so we try again */
                clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+               mcast->query = NULL;
        }
 
        complete(&mcast->done);
@@ -360,7 +361,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
 
        init_completion(&mcast->done);
 
-       ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
+       ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port, &rec,
                                     IB_SA_MCMEMBER_REC_MGID            |
                                     IB_SA_MCMEMBER_REC_PORT_GID        |
                                     IB_SA_MCMEMBER_REC_PKEY            |
@@ -398,7 +399,8 @@ static void ipoib_mcast_join_complete(int status,
                mcast->backoff = 1;
                mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_work(ipoib_workqueue, &priv->mcast_task);
+                       queue_delayed_work(ipoib_workqueue,
+                                          &priv->mcast_task, 0);
                mutex_unlock(&mcast_mutex);
                complete(&mcast->done);
                return;
@@ -434,7 +436,8 @@ static void ipoib_mcast_join_complete(int status,
 
        if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
                if (status == -ETIMEDOUT)
-                       queue_work(ipoib_workqueue, &priv->mcast_task);
+                       queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
+                                          0);
                else
                        queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
                                           mcast->backoff * HZ);
@@ -471,22 +474,32 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
 
        if (create) {
                comp_mask |=
-                       IB_SA_MCMEMBER_REC_QKEY         |
-                       IB_SA_MCMEMBER_REC_SL           |
-                       IB_SA_MCMEMBER_REC_FLOW_LABEL   |
-                       IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
+                       IB_SA_MCMEMBER_REC_QKEY                 |
+                       IB_SA_MCMEMBER_REC_MTU_SELECTOR         |
+                       IB_SA_MCMEMBER_REC_MTU                  |
+                       IB_SA_MCMEMBER_REC_TRAFFIC_CLASS        |
+                       IB_SA_MCMEMBER_REC_RATE_SELECTOR        |
+                       IB_SA_MCMEMBER_REC_RATE                 |
+                       IB_SA_MCMEMBER_REC_SL                   |
+                       IB_SA_MCMEMBER_REC_FLOW_LABEL           |
+                       IB_SA_MCMEMBER_REC_HOP_LIMIT;
 
                rec.qkey          = priv->broadcast->mcmember.qkey;
+               rec.mtu_selector  = IB_SA_EQ;
+               rec.mtu           = priv->broadcast->mcmember.mtu;
+               rec.traffic_class = priv->broadcast->mcmember.traffic_class;
+               rec.rate_selector = IB_SA_EQ;
+               rec.rate          = priv->broadcast->mcmember.rate;
                rec.sl            = priv->broadcast->mcmember.sl;
                rec.flow_label    = priv->broadcast->mcmember.flow_label;
-               rec.traffic_class = priv->broadcast->mcmember.traffic_class;
+               rec.hop_limit     = priv->broadcast->mcmember.hop_limit;
        }
 
        init_completion(&mcast->done);
 
-       ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask,
-                                    mcast->backoff * 1000, GFP_ATOMIC,
-                                    ipoib_mcast_join_complete,
+       ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port,
+                                    &rec, comp_mask, mcast->backoff * 1000,
+                                    GFP_ATOMIC, ipoib_mcast_join_complete,
                                     mcast, &mcast->query);
 
        if (ret < 0) {
@@ -506,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
                mcast->query_id = ret;
 }
 
-void ipoib_mcast_join_task(void *dev_ptr)
+void ipoib_mcast_join_task(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, mcast_task.work);
+       struct net_device *dev = priv->dev;
 
        if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
                return;
@@ -527,7 +541,7 @@ void ipoib_mcast_join_task(void *dev_ptr)
                        priv->local_rate = attr.active_speed *
                                ib_width_enum_to_int(attr.active_width);
                } else
-                       ipoib_warn(priv, "ib_query_port failed\n");
+               ipoib_warn(priv, "ib_query_port failed\n");
        }
 
        if (!priv->broadcast) {
@@ -599,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
 
        mutex_lock(&mcast_mutex);
        if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-               queue_work(ipoib_workqueue, &priv->mcast_task);
+               queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
        mutex_unlock(&mcast_mutex);
 
        spin_lock_irq(&priv->lock);
@@ -680,7 +694,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
         * Just make one shot at leaving and don't wait for a reply;
         * if we fail, too bad.
         */
-       ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec,
+       ret = ib_sa_mcmember_rec_delete(&ipoib_sa_client, priv->ca, priv->port, &rec,
                                        IB_SA_MCMEMBER_REC_MGID         |
                                        IB_SA_MCMEMBER_REC_PORT_GID     |
                                        IB_SA_MCMEMBER_REC_PKEY         |
@@ -694,8 +708,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
        return 0;
 }
 
-void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
-                     struct sk_buff *skb)
+void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_mcast *mcast;
@@ -718,7 +731,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
        if (!mcast) {
                /* Let's create a new send only group now */
                ipoib_dbg_mcast(priv, "setting up send only multicast group for "
-                               IPOIB_GID_FMT "\n", IPOIB_GID_ARG(*mgid));
+                               IPOIB_GID_FMT "\n", IPOIB_GID_RAW_ARG(mgid));
 
                mcast = ipoib_mcast_alloc(dev, 0);
                if (!mcast) {
@@ -730,7 +743,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
                }
 
                set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
-               mcast->mcmember.mgid = *mgid;
+               memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
                __ipoib_mcast_add(dev, mcast);
                list_add_tail(&mcast->list, &priv->multicast_list);
        }
@@ -795,7 +808,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
        }
 
        if (priv->broadcast) {
-               rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
+               rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
                list_add_tail(&priv->broadcast->list, &remove_list);
                priv->broadcast = NULL;
        }
@@ -808,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
        }
 }
 
-void ipoib_mcast_restart_task(void *dev_ptr)
+void ipoib_mcast_restart_task(struct work_struct *work)
 {
-       struct net_device *dev = dev_ptr;
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       struct ipoib_dev_priv *priv =
+               container_of(work, struct ipoib_dev_priv, restart_task);
+       struct net_device *dev = priv->dev;
        struct dev_mc_list *mclist;
        struct ipoib_mcast *mcast, *tmcast;
        LIST_HEAD(remove_list);
@@ -821,7 +835,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
 
        ipoib_mcast_stop_thread(dev, 0);
 
-       spin_lock_irqsave(&dev->xmit_lock, flags);
+       local_irq_save(flags);
+       netif_tx_lock(dev);
        spin_lock(&priv->lock);
 
        /*
@@ -864,8 +879,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
 
                        if (mcast) {
                                /* Destroy the send only entry */
-                               list_del(&mcast->list);
-                               list_add_tail(&mcast->list, &remove_list);
+                               list_move_tail(&mcast->list, &remove_list);
 
                                rb_replace_node(&mcast->rb_node,
                                                &nmcast->rb_node,
@@ -890,13 +904,13 @@ void ipoib_mcast_restart_task(void *dev_ptr)
                        rb_erase(&mcast->rb_node, &priv->multicast_tree);
 
                        /* Move to the remove list */
-                       list_del(&mcast->list);
-                       list_add_tail(&mcast->list, &remove_list);
+                       list_move_tail(&mcast->list, &remove_list);
                }
        }
 
        spin_unlock(&priv->lock);
-       spin_unlock_irqrestore(&dev->xmit_lock, flags);
+       netif_tx_unlock(dev);
+       local_irq_restore(flags);
 
        /* We have to cancel outside of the spinlock */
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {