Fix a problem with not allocating enough room for netlink messages.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 /* Functions for managing the dp interface/device. */
8
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/if_arp.h>
12 #include <linux/if_bridge.h>
13 #include <linux/if_vlan.h>
14 #include <linux/in.h>
15 #include <net/genetlink.h>
16 #include <linux/ip.h>
17 #include <linux/delay.h>
18 #include <linux/etherdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/mutex.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/rcupdate.h>
24 #include <linux/version.h>
25 #include <linux/ethtool.h>
26 #include <linux/random.h>
27 #include <asm/system.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/inetdevice.h>
30 #include <linux/list.h>
31
32 #include "openflow-netlink.h"
33 #include "datapath.h"
34 #include "table.h"
35 #include "chain.h"
36 #include "forward.h"
37 #include "flow.h"
38 #include "datapath_t.h"
39
40 #include "compat.h"
41
42
43 /* Number of milliseconds between runs of the maintenance thread. */
44 #define MAINT_SLEEP_MSECS 1000
45
46 #define BRIDGE_PORT_NO_FLOOD    0x00000001 
47
48 #define UINT32_MAX                        4294967295U
49 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
50
51 struct net_bridge_port {
52         u16     port_no;
53         u32 flags;
54         struct datapath *dp;
55         struct net_device *dev;
56         struct list_head node; /* Element in datapath.ports. */
57 };
58
59 static struct genl_family dp_genl_family;
60 static struct genl_multicast_group mc_group;
61
62 int dp_dev_setup(struct net_device *dev);  
63
64 /* It's hard to imagine wanting more than one datapath, but... */
65 #define DP_MAX 32
66
67 /* datapaths.  Protected on the read side by rcu_read_lock, on the write side
68  * by dp_mutex.
69  *
70  * It is safe to access the datapath and net_bridge_port structures with just
71  * the dp_mutex, but to access the chain you need to take the rcu_read_lock
72  * also (because dp_mutex doesn't prevent flows from being destroyed).
73  */
74 static struct datapath *dps[DP_MAX];
75 static DEFINE_MUTEX(dp_mutex);
76
77 static int dp_maint_func(void *data);
78 static int send_port_status(struct net_bridge_port *p, uint8_t status);
79
80
81 /* nla_unreserve - reduce amount of space reserved by nla_reserve  
82  * @skb: socket buffer from which to recover room
83  * @nla: netlink attribute to adjust
84  * @len: amount by which to reduce attribute payload
85  *
86  * Reduces amount of space reserved by a call to nla_reserve.
87  *
88  * No other attributes may be added between calling nla_reserve and this
89  * function, since it will create a hole in the message.
90  */
91 void nla_unreserve(struct sk_buff *skb, struct nlattr *nla, int len)
92 {
93         skb->tail -= len;
94         skb->len  -= len;
95
96         nla->nla_len -= len;
97 }
98
99 static void *
100 alloc_openflow_skb(struct datapath *dp, size_t openflow_len, uint8_t type,
101                    const struct sender *sender, struct sk_buff **pskb) 
102 {
103         size_t genl_len;
104         struct sk_buff *skb;
105         struct nlattr *attr;
106         struct ofp_header *oh;
107
108         genl_len = nlmsg_total_size(GENL_HDRLEN + dp_genl_family.hdrsize);
109         genl_len += nla_total_size(sizeof(uint32_t)); /* DP_GENL_A_DP_IDX */
110         genl_len += nla_total_size(openflow_len);    /* DP_GENL_A_OPENFLOW */
111         skb = *pskb = genlmsg_new(genl_len, GFP_ATOMIC);
112         if (!skb) {
113                 if (net_ratelimit())
114                         printk("alloc_openflow_skb: genlmsg_new failed\n");
115                 return NULL;
116         }
117
118         /* Assemble the Generic Netlink wrapper. */
119         if (!genlmsg_put(skb,
120                          sender ? sender->pid : 0,
121                          sender ? sender->seq : 0,
122                          &dp_genl_family, 0, DP_GENL_C_OPENFLOW))
123                 BUG();
124         if (nla_put_u32(skb, DP_GENL_A_DP_IDX, dp->dp_idx) < 0)
125                 BUG();
126         attr = nla_reserve(skb, DP_GENL_A_OPENFLOW, openflow_len);
127         BUG_ON(!attr);
128         nlmsg_end(skb, (struct nlmsghdr *) skb->data);
129
130         /* Fill in the header. */
131         oh = nla_data(attr);
132         oh->version = OFP_VERSION;
133         oh->type = type;
134         oh->length = htons(openflow_len);
135         oh->xid = sender ? sender->xid : 0;
136
137         return oh;
138 }
139
140 static void
141 resize_openflow_skb(struct sk_buff *skb,
142                     struct ofp_header *oh, size_t new_length)
143 {
144         struct nlattr *attr;
145
146         BUG_ON(new_length > ntohs(oh->length));
147         attr = ((void *) oh) - NLA_HDRLEN;
148         nla_unreserve(skb, attr, ntohs(oh->length) - new_length);
149         oh->length = htons(new_length);
150         nlmsg_end(skb, (struct nlmsghdr *) skb->data);
151 }
152
153 static int
154 send_openflow_skb(struct sk_buff *skb, const struct sender *sender) 
155 {
156         int err = (sender
157                    ? genlmsg_unicast(skb, sender->pid)
158                    : genlmsg_multicast(skb, 0, mc_group.id, GFP_ATOMIC));
159         if (err && net_ratelimit())
160                 printk(KERN_WARNING "send_openflow_skb: send failed: %d\n",
161                        err);
162         return err;
163 }
164
165 /* Generates a unique datapath id.  It incorporates the datapath index
166  * and a hardware address, if available.  If not, it generates a random
167  * one.
168  */
169 static 
170 uint64_t gen_datapath_id(uint16_t dp_idx)
171 {
172         uint64_t id;
173         int i;
174         struct net_device *dev;
175
176         /* The top 16 bits are used to identify the datapath.  The lower 48 bits
177          * use an interface address.  */
178         id = (uint64_t)dp_idx << 48;
179         if ((dev = dev_get_by_name(&init_net, "ctl0")) 
180                         || (dev = dev_get_by_name(&init_net, "eth0"))) {
181                 for (i=0; i<ETH_ALEN; i++) {
182                         id |= (uint64_t)dev->dev_addr[i] << (8*(ETH_ALEN-1 - i));
183                 }
184                 dev_put(dev);
185         } else {
186                 /* Randomly choose the lower 48 bits if we cannot find an
187                  * address and mark the most significant bit to indicate that
188                  * this was randomly generated. */
189                 uint8_t rand[ETH_ALEN];
190                 get_random_bytes(rand, ETH_ALEN);
191                 id |= (uint64_t)1 << 63;
192                 for (i=0; i<ETH_ALEN; i++) {
193                         id |= (uint64_t)rand[i] << (8*(ETH_ALEN-1 - i));
194                 }
195         }
196
197         return id;
198 }
199
200 /* Creates a new datapath numbered 'dp_idx'.  Returns 0 for success or a
201  * negative error code.
202  *
203  * Not called with any locks. */
204 static int new_dp(int dp_idx)
205 {
206         struct datapath *dp;
207         int err;
208
209         if (dp_idx < 0 || dp_idx >= DP_MAX)
210                 return -EINVAL;
211
212         if (!try_module_get(THIS_MODULE))
213                 return -ENODEV;
214
215         mutex_lock(&dp_mutex);
216         dp = rcu_dereference(dps[dp_idx]);
217         if (dp != NULL) {
218                 err = -EEXIST;
219                 goto err_unlock;
220         }
221
222         err = -ENOMEM;
223         dp = kzalloc(sizeof *dp, GFP_KERNEL);
224         if (dp == NULL)
225                 goto err_unlock;
226
227         dp->dp_idx = dp_idx;
228         dp->id = gen_datapath_id(dp_idx);
229         dp->chain = chain_create(dp);
230         if (dp->chain == NULL)
231                 goto err_free_dp;
232         INIT_LIST_HEAD(&dp->port_list);
233
234 #if 0
235         /* Setup our "of" device */
236         dp->dev.priv = dp;
237         rtnl_lock();
238         err = dp_dev_setup(&dp->dev);
239         rtnl_unlock();
240         if (err != 0) 
241                 printk("datapath: problem setting up 'of' device\n");
242 #endif
243
244         dp->flags = 0;
245         dp->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
246
247         dp->dp_task = kthread_run(dp_maint_func, dp, "dp%d", dp_idx);
248         if (IS_ERR(dp->dp_task))
249                 goto err_free_dp;
250
251         rcu_assign_pointer(dps[dp_idx], dp);
252         mutex_unlock(&dp_mutex);
253
254         return 0;
255
256 err_free_dp:
257         kfree(dp);
258 err_unlock:
259         mutex_unlock(&dp_mutex);
260         module_put(THIS_MODULE);
261                 return err;
262 }
263
264 /* Find and return a free port number under 'dp'.  Called under dp_mutex. */
265 static int find_portno(struct datapath *dp)
266 {
267         int i;
268         for (i = 0; i < OFPP_MAX; i++)
269                 if (dp->ports[i] == NULL)
270                         return i;
271         return -EXFULL;
272 }
273
274 static struct net_bridge_port *new_nbp(struct datapath *dp,
275                                                                            struct net_device *dev)
276 {
277         struct net_bridge_port *p;
278         int port_no;
279
280         port_no = find_portno(dp);
281         if (port_no < 0)
282                 return ERR_PTR(port_no);
283
284         p = kzalloc(sizeof(*p), GFP_KERNEL);
285         if (p == NULL)
286                 return ERR_PTR(-ENOMEM);
287
288         p->dp = dp;
289         dev_hold(dev);
290         p->dev = dev;
291         p->port_no = port_no;
292
293         return p;
294 }
295
296 /* Called with dp_mutex. */
297 int add_switch_port(struct datapath *dp, struct net_device *dev)
298 {
299         struct net_bridge_port *p;
300
301         if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
302                 return -EINVAL;
303
304         if (dev->br_port != NULL)
305                 return -EBUSY;
306
307         p = new_nbp(dp, dev);
308         if (IS_ERR(p))
309                 return PTR_ERR(p);
310
311         dev_hold(dev);
312         rcu_assign_pointer(dev->br_port, p);
313         rtnl_lock();
314         dev_set_promiscuity(dev, 1);
315         rtnl_unlock();
316
317         rcu_assign_pointer(dp->ports[p->port_no], p);
318         list_add_rcu(&p->node, &dp->port_list);
319
320         /* Notify the ctlpath that this port has been added */
321         send_port_status(p, OFPPR_ADD);
322
323         return 0;
324 }
325
326 /* Delete 'p' from switch.
327  * Called with dp_mutex. */
328 static int del_switch_port(struct net_bridge_port *p)
329 {
330         /* First drop references to device. */
331         rtnl_lock();
332         dev_set_promiscuity(p->dev, -1);
333         rtnl_unlock();
334         list_del_rcu(&p->node);
335         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
336         rcu_assign_pointer(p->dev->br_port, NULL);
337
338         /* Then wait until no one is still using it, and destroy it. */
339         synchronize_rcu();
340
341         /* Notify the ctlpath that this port no longer exists */
342         send_port_status(p, OFPPR_DELETE);
343
344         dev_put(p->dev);
345         kfree(p);
346
347         return 0;
348 }
349
350 /* Called with dp_mutex. */
351 static void del_dp(struct datapath *dp)
352 {
353         struct net_bridge_port *p, *n;
354
355 #if 0
356         /* Unregister the "of" device of this dp */
357         rtnl_lock();
358         unregister_netdevice(&dp->dev);
359         rtnl_unlock();
360 #endif
361
362         kthread_stop(dp->dp_task);
363
364         /* Drop references to DP. */
365         list_for_each_entry_safe (p, n, &dp->port_list, node)
366                 del_switch_port(p);
367         rcu_assign_pointer(dps[dp->dp_idx], NULL);
368
369         /* Wait until no longer in use, then destroy it. */
370         synchronize_rcu();
371         chain_destroy(dp->chain);
372         kfree(dp);
373         module_put(THIS_MODULE);
374 }
375
376 static int dp_maint_func(void *data)
377 {
378         struct datapath *dp = (struct datapath *) data;
379
380         while (!kthread_should_stop()) {
381 #if 1
382                 chain_timeout(dp->chain);
383 #else
384                 int count = chain_timeout(dp->chain);
385                 chain_print_stats(dp->chain);
386                 if (count)
387                         printk("%d flows timed out\n", count);
388 #endif
389                 msleep_interruptible(MAINT_SLEEP_MSECS);
390         }
391                 
392         return 0;
393 }
394
395 /*
396  * Used as br_handle_frame_hook.  (Cannot run bridge at the same time, even on
397  * different set of devices!)  Returns 0 if *pskb should be processed further,
398  * 1 if *pskb is handled. */
399 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
400 /* Called with rcu_read_lock. */
401 static struct sk_buff *dp_frame_hook(struct net_bridge_port *p,
402                                          struct sk_buff *skb)
403 {
404         struct ethhdr *eh = eth_hdr(skb);
405         struct sk_buff *skb_local = NULL;
406
407
408         if (compare_ether_addr(eh->h_dest, skb->dev->dev_addr) == 0) 
409                 return skb;
410
411         if (is_broadcast_ether_addr(eh->h_dest)
412                                 || is_multicast_ether_addr(eh->h_dest)
413                                 || is_local_ether_addr(eh->h_dest)) 
414                 skb_local = skb_clone(skb, GFP_ATOMIC);
415
416         /* Push the Ethernet header back on. */
417         if (skb->protocol == htons(ETH_P_8021Q))
418                 skb_push(skb, VLAN_ETH_HLEN);
419         else
420                 skb_push(skb, ETH_HLEN);
421
422         fwd_port_input(p->dp->chain, skb, p->port_no);
423
424         return skb_local;
425 }
426 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
427 static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
428 {
429         /* Push the Ethernet header back on. */
430         if ((*pskb)->protocol == htons(ETH_P_8021Q))
431                 skb_push(*pskb, VLAN_ETH_HLEN);
432         else
433                 skb_push(*pskb, ETH_HLEN);
434
435         fwd_port_input(p->dp->chain, *pskb, p->port_no);
436         return 1;
437 }
438 #else 
439 /* NB: This has only been tested on 2.4.35 */
440
441 /* Called without any locks (?) */
442 static void dp_frame_hook(struct sk_buff *skb)
443 {
444         struct net_bridge_port *p = skb->dev->br_port;
445
446         /* Push the Ethernet header back on. */
447         if (skb->protocol == htons(ETH_P_8021Q))
448                 skb_push(skb, VLAN_ETH_HLEN);
449         else
450                 skb_push(skb, ETH_HLEN);
451
452         if (p) {
453                 rcu_read_lock();
454                 fwd_port_input(p->dp->chain, skb, p->port_no);
455                 rcu_read_unlock();
456         } else
457                 kfree_skb(skb);
458 }
459 #endif
460
461 /* Forwarding output path.
462  * Based on net/bridge/br_forward.c. */
463
464 /* Don't forward packets to originating port or with flooding disabled */
465 static inline int should_deliver(const struct net_bridge_port *p,
466                         const struct sk_buff *skb)
467 {
468         if ((skb->dev == p->dev) || (p->flags & BRIDGE_PORT_NO_FLOOD)) {
469                 return 0;
470         } 
471
472         return 1;
473 }
474
475 static inline unsigned packet_length(const struct sk_buff *skb)
476 {
477         int length = skb->len - ETH_HLEN;
478         if (skb->protocol == htons(ETH_P_8021Q))
479                 length -= VLAN_HLEN;
480         return length;
481 }
482
483 static int
484 flood(struct datapath *dp, struct sk_buff *skb)
485 {
486         struct net_bridge_port *p;
487         int prev_port;
488
489         prev_port = -1;
490         list_for_each_entry_rcu (p, &dp->port_list, node) {
491                 if (!should_deliver(p, skb))
492                         continue;
493                 if (prev_port != -1) {
494                         struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
495                         if (!clone) {
496                                 kfree_skb(skb);
497                                 return -ENOMEM;
498                         }
499                         dp_output_port(dp, clone, prev_port); 
500                 }
501                 prev_port = p->port_no;
502         }
503         if (prev_port != -1)
504                 dp_output_port(dp, skb, prev_port);
505         else
506                 kfree_skb(skb);
507
508         return 0;
509 }
510
511 /* Marks 'skb' as having originated from 'in_port' in 'dp'.
512    FIXME: how are devices reference counted? */
513 int dp_set_origin(struct datapath *dp, uint16_t in_port,
514                            struct sk_buff *skb)
515 {
516         if (in_port < OFPP_MAX && dp->ports[in_port]) {
517                 skb->dev = dp->ports[in_port]->dev;
518                 return 0;
519         }
520         return -ENOENT;
521 }
522
523 /* Takes ownership of 'skb' and transmits it to 'out_port' on 'dp'.
524  */
525 int dp_output_port(struct datapath *dp, struct sk_buff *skb, int out_port)
526 {
527         struct net_bridge_port *p;
528         int len = skb->len;
529
530         BUG_ON(!skb);
531         if (out_port == OFPP_FLOOD)
532                 return flood(dp, skb);
533         else if (out_port == OFPP_CONTROLLER)
534                 return dp_output_control(dp, skb, fwd_save_skb(skb), 0,
535                                                   OFPR_ACTION);
536         else if (out_port == OFPP_TABLE) {
537                 struct sw_flow_key key;
538                 struct sw_flow *flow;
539
540                 flow_extract(skb, skb->dev->br_port->port_no, &key);
541                 flow = chain_lookup(dp->chain, &key);
542                 if (likely(flow != NULL)) {
543                         flow_used(flow, skb);
544                         execute_actions(dp, skb, &key, flow->actions, flow->n_actions);
545                         return 0;
546                 }
547                 return -ESRCH;
548         } else if (out_port >= OFPP_MAX)
549                 goto bad_port;
550
551         p = dp->ports[out_port];
552         if (p == NULL)
553                 goto bad_port;
554
555         skb->dev = p->dev;
556         if (packet_length(skb) > skb->dev->mtu) {
557                 printk("dropped over-mtu packet: %d > %d\n",
558                                         packet_length(skb), skb->dev->mtu);
559                 kfree_skb(skb);
560                 return -E2BIG;
561         }
562
563         dev_queue_xmit(skb);
564
565         return len;
566
567 bad_port:
568         kfree_skb(skb);
569         if (net_ratelimit())
570                 printk("can't forward to bad port %d\n", out_port);
571         return -ENOENT;
572 }
573
574 /* Takes ownership of 'skb' and transmits it to 'dp''s control path.  If
575  * 'buffer_id' != -1, then only the first 64 bytes of 'skb' are sent;
576  * otherwise, all of 'skb' is sent.  'reason' indicates why 'skb' is being
577  * sent. 'max_len' sets the maximum number of bytes that the caller
578  * wants to be sent; a value of 0 indicates the entire packet should be
579  * sent. */
580 int
581 dp_output_control(struct datapath *dp, struct sk_buff *skb,
582                            uint32_t buffer_id, size_t max_len, int reason)
583 {
584         /* FIXME?  Can we avoid creating a new skbuff in the case where we
585          * forward the whole packet? */
586         struct sk_buff *f_skb;
587         struct ofp_packet_in *opi;
588         size_t fwd_len, opi_len;
589         int err;
590
591         fwd_len = skb->len;
592         if ((buffer_id != (uint32_t) -1) && max_len)
593                 fwd_len = min(fwd_len, max_len);
594
595         opi_len = offsetof(struct ofp_packet_in, data) + fwd_len;
596         opi = alloc_openflow_skb(dp, opi_len, OFPT_PACKET_IN, NULL, &f_skb);
597         opi->buffer_id      = htonl(buffer_id);
598         opi->total_len      = htons(skb->len);
599         opi->in_port        = htons(skb->dev->br_port->port_no);
600         opi->reason         = reason;
601         opi->pad            = 0;
602         memcpy(opi->data, skb_mac_header(skb), fwd_len);
603         err = send_openflow_skb(f_skb, NULL);
604
605         kfree_skb(skb);
606
607         return err;
608 }
609
610 static void fill_port_desc(struct net_bridge_port *p, struct ofp_phy_port *desc)
611 {
612         desc->port_no = htons(p->port_no);
613         strncpy(desc->name, p->dev->name, OFP_MAX_PORT_NAME_LEN);
614         desc->name[OFP_MAX_PORT_NAME_LEN-1] = '\0';
615         memcpy(desc->hw_addr, p->dev->dev_addr, ETH_ALEN);
616         desc->flags = htonl(p->flags);
617         desc->features = 0;
618         desc->speed = 0;
619
620 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,24)
621         if (p->dev->ethtool_ops && p->dev->ethtool_ops->get_settings) {
622                 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
623
624                 if (!p->dev->ethtool_ops->get_settings(p->dev, &ecmd)) {
625                         if (ecmd.supported & SUPPORTED_10baseT_Half) 
626                                 desc->features |= OFPPF_10MB_HD;
627                         if (ecmd.supported & SUPPORTED_10baseT_Full)
628                                 desc->features |= OFPPF_10MB_FD;
629                         if (ecmd.supported & SUPPORTED_100baseT_Half) 
630                                 desc->features |= OFPPF_100MB_HD;
631                         if (ecmd.supported & SUPPORTED_100baseT_Full)
632                                 desc->features |= OFPPF_100MB_FD;
633                         if (ecmd.supported & SUPPORTED_1000baseT_Half)
634                                 desc->features |= OFPPF_1GB_HD;
635                         if (ecmd.supported & SUPPORTED_1000baseT_Full)
636                                 desc->features |= OFPPF_1GB_FD;
637                         /* 10Gbps half-duplex doesn't exist... */
638                         if (ecmd.supported & SUPPORTED_10000baseT_Full)
639                                 desc->features |= OFPPF_10GB_FD;
640
641                         desc->features = htonl(desc->features);
642                         desc->speed = htonl(ecmd.speed);
643                 }
644         }
645 #endif
646 }
647
648 static int 
649 fill_features_reply(struct datapath *dp, struct ofp_switch_features *ofr)
650 {
651         struct net_bridge_port *p;
652         int port_count = 0;
653
654         ofr->datapath_id    = cpu_to_be64(dp->id); 
655
656         ofr->n_exact        = htonl(2 * TABLE_HASH_MAX_FLOWS);
657         ofr->n_compression  = 0;                                           /* Not supported */
658         ofr->n_general      = htonl(TABLE_LINEAR_MAX_FLOWS);
659         ofr->buffer_mb      = htonl(UINT32_MAX);
660         ofr->n_buffers      = htonl(N_PKT_BUFFERS);
661         ofr->capabilities   = htonl(OFP_SUPPORTED_CAPABILITIES);
662         ofr->actions        = htonl(OFP_SUPPORTED_ACTIONS);
663
664         list_for_each_entry_rcu (p, &dp->port_list, node) {
665                 fill_port_desc(p, &ofr->ports[port_count]);
666                 port_count++;
667         }
668
669         return port_count;
670 }
671
672 int
673 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
674 {
675         struct sk_buff *skb;
676         struct ofp_switch_features *ofr;
677         size_t ofr_len, port_max_len;
678         int port_count;
679
680         /* Overallocate. */
681         port_max_len = sizeof(struct ofp_phy_port) * OFPP_MAX;
682         ofr = alloc_openflow_skb(dp, sizeof(*ofr) + port_max_len,
683                                  OFPT_FEATURES_REPLY, sender, &skb);
684         if (!ofr)
685                 return -ENOMEM;
686
687         /* Fill. */
688         port_count = fill_features_reply(dp, ofr);
689
690         /* Shrink to fit. */
691         ofr_len = sizeof(*ofr) + (sizeof(struct ofp_phy_port) * port_count);
692         resize_openflow_skb(skb, &ofr->header, ofr_len);
693         return send_openflow_skb(skb, sender);
694 }
695
696 int
697 dp_send_config_reply(struct datapath *dp, const struct sender *sender)
698 {
699         struct sk_buff *skb;
700         struct ofp_switch_config *osc;
701
702         osc = alloc_openflow_skb(dp, sizeof *osc, OFPT_GET_CONFIG_REPLY, sender,
703                                  &skb);
704         if (!osc)
705                 return -ENOMEM;
706
707         osc->flags = htons(dp->flags);
708         osc->miss_send_len = htons(dp->miss_send_len);
709
710         return send_openflow_skb(skb, sender);
711 }
712
713 int
714 dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp)
715 {
716         struct net_bridge_port *p;
717
718         p = dp->ports[htons(opp->port_no)];
719
720         /* Make sure the port id hasn't changed since this was sent */
721         if (!p || memcmp(opp->hw_addr, p->dev->dev_addr, ETH_ALEN) != 0) 
722                 return -1;
723         
724         p->flags = htonl(opp->flags);
725
726         return 0;
727 }
728
729
730 static int
731 send_port_status(struct net_bridge_port *p, uint8_t status)
732 {
733         struct sk_buff *skb;
734         struct ofp_port_status *ops;
735
736         ops = alloc_openflow_skb(p->dp, sizeof *ops, OFPT_PORT_STATUS, NULL,
737                                  &skb);
738         if (!ops)
739                 return -ENOMEM;
740         ops->reason = status;
741         fill_port_desc(p, &ops->desc);
742
743         return send_openflow_skb(skb, NULL);
744 }
745
746 int 
747 dp_send_flow_expired(struct datapath *dp, struct sw_flow *flow)
748 {
749         struct sk_buff *skb;
750         struct ofp_flow_expired *ofe;
751         unsigned long duration_j;
752
753         ofe = alloc_openflow_skb(dp, sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &skb);
754         if (!ofe)
755                 return -ENOMEM;
756
757         flow_fill_match(&ofe->match, &flow->key);
758         duration_j = (flow->timeout - HZ * flow->max_idle) - flow->init_time;
759         ofe->duration   = htonl(duration_j / HZ);
760         ofe->packet_count   = cpu_to_be64(flow->packet_count);
761         ofe->byte_count     = cpu_to_be64(flow->byte_count);
762         return send_openflow_skb(skb, NULL);
763 }
764
765 static void
766 fill_flow_stats(struct ofp_flow_stats *ofs, struct sw_flow *flow,
767                 int table_idx)
768 {
769         ofs->match.wildcards = htons(flow->key.wildcards);
770         ofs->match.in_port   = flow->key.in_port;
771         memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
772         memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
773         ofs->match.dl_vlan   = flow->key.dl_vlan;
774         ofs->match.dl_type   = flow->key.dl_type;
775         ofs->match.nw_src    = flow->key.nw_src;
776         ofs->match.nw_dst    = flow->key.nw_dst;
777         ofs->match.nw_proto  = flow->key.nw_proto;
778         memset(ofs->match.pad, 0, sizeof ofs->match.pad);
779         ofs->match.tp_src    = flow->key.tp_src;
780         ofs->match.tp_dst    = flow->key.tp_dst;
781         ofs->duration        = htonl((jiffies - flow->init_time) / HZ);
782         ofs->table_id        = table_idx;
783         ofs->packet_count    = cpu_to_be64(flow->packet_count);
784         ofs->byte_count      = cpu_to_be64(flow->byte_count);
785 }
786
787 int
788 dp_send_flow_stats(struct datapath *dp, const struct sender *sender,
789                    const struct ofp_match *match)
790 {
791         struct sk_buff *skb;
792         struct ofp_flow_stats_reply *fsr;
793         size_t header_size, fudge, flow_size;
794         struct sw_flow_key match_key;
795         int table_idx, n_flows, max_flows;
796
797         header_size = offsetof(struct ofp_flow_stats_reply, flows);
798         fudge = 128;
799         flow_size = sizeof fsr->flows[0];
800         max_flows = (NLMSG_GOODSIZE - header_size - fudge) / flow_size;
801         fsr = alloc_openflow_skb(dp, header_size + max_flows * flow_size,
802                                  OFPT_FLOW_STATS_REPLY, sender, &skb);
803         if (!fsr)
804                 return -ENOMEM;
805
806         n_flows = 0;
807         flow_extract_match(&match_key, match);
808         for (table_idx = 0; table_idx < dp->chain->n_tables; table_idx++) {
809                 struct sw_table *table = dp->chain->tables[table_idx];
810                 struct swt_iterator iter;
811
812                 if (n_flows >= max_flows) {
813                         break;
814                 }
815
816                 if (!table->iterator(table, &iter)) {
817                         if (net_ratelimit())
818                                 printk("iterator failed for table %d\n",
819                                        table_idx);
820                         continue;
821                 }
822
823                 for (; iter.flow; table->iterator_next(&iter)) {
824                         if (flow_matches(&match_key, &iter.flow->key)) {
825                                 fill_flow_stats(&fsr->flows[n_flows],
826                                                 iter.flow, table_idx);
827                                 if (++n_flows >= max_flows) {
828                                         break;
829                                 }
830                         }
831                 }
832                 table->iterator_destroy(&iter);
833         }
834         resize_openflow_skb(skb, &fsr->header,
835                             header_size + flow_size * n_flows);
836         return send_openflow_skb(skb, sender);
837 }
838
839 static int 
840 fill_port_stats_reply(struct datapath *dp, struct ofp_port_stats_reply *psr)
841 {
842         struct net_bridge_port *p;
843         int port_count = 0;
844
845         list_for_each_entry_rcu (p, &dp->port_list, node) {
846                 struct ofp_port_stats *ps = &psr->ports[port_count++];
847                 struct net_device_stats *stats = p->dev->get_stats(p->dev);
848                 ps->port_no = htons(p->port_no);
849                 memset(ps->pad, 0, sizeof ps->pad);
850                 ps->rx_count = cpu_to_be64(stats->rx_packets);
851                 ps->tx_count = cpu_to_be64(stats->tx_packets);
852                 ps->drop_count = cpu_to_be64(stats->rx_dropped
853                                              + stats->tx_dropped);
854         }
855
856         return port_count;
857 }
858
859 int
860 dp_send_port_stats(struct datapath *dp, const struct sender *sender)
861 {
862         struct sk_buff *skb;
863         struct ofp_port_stats_reply *psr;
864         size_t psr_len, port_max_len;
865         int port_count;
866
867         /* Overallocate. */
868         port_max_len = sizeof(struct ofp_port_stats) * OFPP_MAX;
869         psr = alloc_openflow_skb(dp, sizeof *psr + port_max_len,
870                                  OFPT_PORT_STATS_REPLY, sender, &skb);
871         if (!psr)
872                 return -ENOMEM;
873
874         /* Fill. */
875         port_count = fill_port_stats_reply(dp, psr);
876
877         /* Shrink to fit. */
878         psr_len = sizeof *psr + sizeof(struct ofp_port_stats) * port_count;
879         resize_openflow_skb(skb, &psr->header, psr_len);
880         return send_openflow_skb(skb, sender);
881 }
882
883 int
884 dp_send_table_stats(struct datapath *dp, const struct sender *sender)
885 {
886         struct sk_buff *skb;
887         struct ofp_table_stats_reply *tsr;
888         int i, n_tables;
889
890         n_tables = dp->chain->n_tables;
891         tsr = alloc_openflow_skb(dp, (offsetof(struct ofp_table_stats_reply,
892                                                tables)
893                                       + sizeof tsr->tables[0] * n_tables),
894                                  OFPT_TABLE_STATS_REPLY, sender, &skb);
895         if (!tsr)
896                 return -ENOMEM;
897         for (i = 0; i < n_tables; i++) {
898                 struct ofp_table_stats *ots = &tsr->tables[i];
899                 struct sw_table_stats stats;
900                 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
901                 strncpy(ots->name, stats.name, sizeof ots->name);
902                 ots->table_id = i;
903                 ots->pad[0] = ots->pad[1] = 0;
904                 ots->max_entries = htonl(stats.max_flows);
905                 ots->active_count = htonl(stats.n_flows);
906                 ots->matched_count = cpu_to_be64(0); /* FIXME */
907         }
908         return send_openflow_skb(skb, sender);
909 }
910
911 /* Generic Netlink interface.
912  *
913  * See netlink(7) for an introduction to netlink.  See
914  * http://linux-net.osdl.org/index.php/Netlink for more information and
915  * pointers on how to work with netlink and Generic Netlink in the kernel and
916  * in userspace. */
917
918 static struct genl_family dp_genl_family = {
919         .id = GENL_ID_GENERATE,
920         .hdrsize = 0,
921         .name = DP_GENL_FAMILY_NAME,
922         .version = 1,
923         .maxattr = DP_GENL_A_MAX,
924 };
925
926 /* Attribute policy: what each attribute may contain.  */
927 static struct nla_policy dp_genl_policy[DP_GENL_A_MAX + 1] = {
928         [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
929         [DP_GENL_A_MC_GROUP] = { .type = NLA_U32 },
930         [DP_GENL_A_PORTNAME] = { .type = NLA_STRING }
931 };
932
933 static int dp_genl_add(struct sk_buff *skb, struct genl_info *info)
934 {
935         if (!info->attrs[DP_GENL_A_DP_IDX])
936                 return -EINVAL;
937
938         return new_dp(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
939 }
940
941 static struct genl_ops dp_genl_ops_add_dp = {
942         .cmd = DP_GENL_C_ADD_DP,
943         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
944         .policy = dp_genl_policy,
945         .doit = dp_genl_add,
946         .dumpit = NULL,
947 };
948
949 struct datapath *dp_get(int dp_idx)
950 {
951         if (dp_idx < 0 || dp_idx > DP_MAX)
952                 return NULL;
953         return rcu_dereference(dps[dp_idx]);
954 }
955
956 static int dp_genl_del(struct sk_buff *skb, struct genl_info *info)
957 {
958         struct datapath *dp;
959         int err;
960
961         if (!info->attrs[DP_GENL_A_DP_IDX])
962                 return -EINVAL;
963
964         mutex_lock(&dp_mutex);
965         dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX])));
966         if (!dp)
967                 err = -ENOENT;
968         else {
969                 del_dp(dp);
970                 err = 0;
971         }
972         mutex_unlock(&dp_mutex);
973         return err;
974 }
975
976 static struct genl_ops dp_genl_ops_del_dp = {
977         .cmd = DP_GENL_C_DEL_DP,
978         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
979         .policy = dp_genl_policy,
980         .doit = dp_genl_del,
981         .dumpit = NULL,
982 };
983
984 /* Queries a datapath for related information.  Currently the only relevant
985  * information is the datapath's multicast group ID.  Really we want one
986  * multicast group per datapath, but because of locking issues[*] we can't
987  * easily get one.  Thus, every datapath will currently return the same
988  * global multicast group ID, but in the future it would be nice to fix that.
989  *
990  * [*] dp_genl_add, to add a new datapath, is called under the genl_lock
991  *       mutex, and genl_register_mc_group, called to acquire a new multicast
992  *       group ID, also acquires genl_lock, thus deadlock.
993  */
994 static int dp_genl_query(struct sk_buff *skb, struct genl_info *info)
995 {
996         struct datapath *dp;
997         struct sk_buff *ans_skb = NULL;
998         int dp_idx;
999         int err = -ENOMEM;
1000
1001         if (!info->attrs[DP_GENL_A_DP_IDX])
1002                 return -EINVAL;
1003
1004         rcu_read_lock();
1005         dp_idx = nla_get_u32((info->attrs[DP_GENL_A_DP_IDX]));
1006         dp = dp_get(dp_idx);
1007         if (!dp)
1008                 err = -ENOENT;
1009         else {
1010                 void *data;
1011                 ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1012                 if (!ans_skb) {
1013                         err = -ENOMEM;
1014                         goto err;
1015                 }
1016                 data = genlmsg_put_reply(ans_skb, info, &dp_genl_family,
1017                                          0, DP_GENL_C_QUERY_DP);
1018                 if (data == NULL) {
1019                         err = -ENOMEM;
1020                         goto err;
1021                 }
1022                 NLA_PUT_U32(ans_skb, DP_GENL_A_DP_IDX, dp_idx);
1023                 NLA_PUT_U32(ans_skb, DP_GENL_A_MC_GROUP, mc_group.id);
1024
1025                 genlmsg_end(ans_skb, data);
1026                 err = genlmsg_reply(ans_skb, info);
1027                 if (!err)
1028                         ans_skb = NULL;
1029         }
1030 err:
1031 nla_put_failure:
1032         if (ans_skb)
1033                 kfree_skb(ans_skb);
1034         rcu_read_unlock();
1035         return err;
1036 }
1037
1038 static struct genl_ops dp_genl_ops_query_dp = {
1039         .cmd = DP_GENL_C_QUERY_DP,
1040         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1041         .policy = dp_genl_policy,
1042         .doit = dp_genl_query,
1043         .dumpit = NULL,
1044 };
1045
1046 static int dp_genl_add_del_port(struct sk_buff *skb, struct genl_info *info)
1047 {
1048         struct datapath *dp;
1049         struct net_device *port;
1050         int err;
1051
1052         if (!info->attrs[DP_GENL_A_DP_IDX] || !info->attrs[DP_GENL_A_PORTNAME])
1053                 return -EINVAL;
1054
1055         /* Get datapath. */
1056         mutex_lock(&dp_mutex);
1057         dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1058         if (!dp) {
1059                 err = -ENOENT;
1060                 goto out;
1061         }
1062
1063         /* Get interface to add/remove. */
1064         port = dev_get_by_name(&init_net, 
1065                         nla_data(info->attrs[DP_GENL_A_PORTNAME]));
1066         if (!port) {
1067                 err = -ENOENT;
1068                 goto out;
1069         }
1070
1071         /* Execute operation. */
1072         if (info->genlhdr->cmd == DP_GENL_C_ADD_PORT)
1073                 err = add_switch_port(dp, port);
1074         else {
1075                 if (port->br_port == NULL || port->br_port->dp != dp) {
1076                         err = -ENOENT;
1077                         goto out_put;
1078                 }
1079                 err = del_switch_port(port->br_port);
1080         }
1081
1082 out_put:
1083         dev_put(port);
1084 out:
1085         mutex_unlock(&dp_mutex);
1086         return err;
1087 }
1088
1089 static struct genl_ops dp_genl_ops_add_port = {
1090         .cmd = DP_GENL_C_ADD_PORT,
1091         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1092         .policy = dp_genl_policy,
1093         .doit = dp_genl_add_del_port,
1094         .dumpit = NULL,
1095 };
1096
1097 static struct genl_ops dp_genl_ops_del_port = {
1098         .cmd = DP_GENL_C_DEL_PORT,
1099         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1100         .policy = dp_genl_policy,
1101         .doit = dp_genl_add_del_port,
1102         .dumpit = NULL,
1103 };
1104
1105 static int dp_genl_openflow(struct sk_buff *skb, struct genl_info *info)
1106 {
1107         struct nlattr *va = info->attrs[DP_GENL_A_OPENFLOW];
1108         struct datapath *dp;
1109         struct ofp_header *oh;
1110         struct sender sender;
1111         int err;
1112
1113         if (!info->attrs[DP_GENL_A_DP_IDX] || !va)
1114                 return -EINVAL;
1115
1116         rcu_read_lock();
1117         dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1118         if (!dp) {
1119                 err = -ENOENT;
1120                 goto out;
1121         }
1122
1123         if (nla_len(va) < sizeof(struct ofp_header)) {
1124                 err = -EINVAL;
1125                 goto out;
1126         }
1127         oh = nla_data(va);
1128
1129         sender.xid = oh->xid;
1130         sender.pid = info->snd_pid;
1131         sender.seq = info->snd_seq;
1132         err = fwd_control_input(dp->chain, &sender, nla_data(va), nla_len(va));
1133
1134 out:
1135         rcu_read_unlock();
1136         return err;
1137 }
1138
1139 static struct nla_policy dp_genl_openflow_policy[DP_GENL_A_MAX + 1] = {
1140         [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1141 };
1142
1143 static struct genl_ops dp_genl_ops_openflow = {
1144         .cmd = DP_GENL_C_OPENFLOW,
1145         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1146         .policy = dp_genl_openflow_policy,
1147         .doit = dp_genl_openflow,
1148         .dumpit = NULL,
1149 };
1150
1151 static struct nla_policy dp_genl_benchmark_policy[DP_GENL_A_MAX + 1] = {
1152         [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1153         [DP_GENL_A_NPACKETS] = { .type = NLA_U32 },
1154         [DP_GENL_A_PSIZE] = { .type = NLA_U32 },
1155 };
1156
1157 static struct genl_ops dp_genl_ops_benchmark_nl = {
1158         .cmd = DP_GENL_C_BENCHMARK_NL,
1159         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1160         .policy = dp_genl_benchmark_policy,
1161         .doit = dp_genl_benchmark_nl,
1162         .dumpit = NULL,
1163 };
1164
1165 static struct genl_ops *dp_genl_all_ops[] = {
1166         /* Keep this operation first.  Generic Netlink dispatching
1167          * looks up operations with linear search, so we want it at the
1168          * front. */
1169         &dp_genl_ops_openflow,
1170
1171         &dp_genl_ops_add_dp,
1172         &dp_genl_ops_del_dp,
1173         &dp_genl_ops_query_dp,
1174         &dp_genl_ops_add_port,
1175         &dp_genl_ops_del_port,
1176         &dp_genl_ops_benchmark_nl,
1177 };
1178
1179 static int dp_init_netlink(void)
1180 {
1181         int err;
1182         int i;
1183
1184         err = genl_register_family(&dp_genl_family);
1185         if (err)
1186                 return err;
1187
1188         for (i = 0; i < ARRAY_SIZE(dp_genl_all_ops); i++) {
1189                 err = genl_register_ops(&dp_genl_family, dp_genl_all_ops[i]);
1190                 if (err)
1191                         goto err_unregister;
1192         }
1193
1194         strcpy(mc_group.name, "openflow");
1195         err = genl_register_mc_group(&dp_genl_family, &mc_group);
1196         if (err < 0)
1197                 goto err_unregister;
1198
1199         return 0;
1200
1201 err_unregister:
1202         genl_unregister_family(&dp_genl_family);
1203                 return err;
1204 }
1205
1206 static void dp_uninit_netlink(void)
1207 {
1208         genl_unregister_family(&dp_genl_family);
1209 }
1210
1211 #define DRV_NAME                "openflow"
1212 #define DRV_VERSION      VERSION
1213 #define DRV_DESCRIPTION "OpenFlow switching datapath implementation"
1214 #define DRV_COPYRIGHT   "Copyright (c) 2007, 2008 The Board of Trustees of The Leland Stanford Junior University"
1215
1216
1217 static int __init dp_init(void)
1218 {
1219         int err;
1220
1221         printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION "\n");
1222         printk(KERN_INFO DRV_NAME ": " VERSION" built on "__DATE__" "__TIME__"\n");
1223         printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
1224
1225         err = flow_init();
1226         if (err)
1227                 goto error;
1228
1229         err = dp_init_netlink();
1230         if (err)
1231                 goto error_flow_exit;
1232
1233         /* Hook into callback used by the bridge to intercept packets.
1234          * Parasites we are. */
1235         if (br_handle_frame_hook)
1236                 printk("openflow: hijacking bridge hook\n");
1237         br_handle_frame_hook = dp_frame_hook;
1238
1239         return 0;
1240
1241 error_flow_exit:
1242         flow_exit();
1243 error:
1244         printk(KERN_EMERG "openflow: failed to install!");
1245         return err;
1246 }
1247
1248 static void dp_cleanup(void)
1249 {
1250         fwd_exit();
1251         dp_uninit_netlink();
1252         flow_exit();
1253         br_handle_frame_hook = NULL;
1254 }
1255
1256 module_init(dp_init);
1257 module_exit(dp_cleanup);
1258
1259 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1260 MODULE_AUTHOR(DRV_COPYRIGHT);
1261 MODULE_LICENSE("GPL");