Merge remote branch 'repo/master' into stats
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 /* Functions for managing the dp interface/device. */
8
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/if_arp.h>
12 #include <linux/if_bridge.h>
13 #include <linux/if_vlan.h>
14 #include <linux/in.h>
15 #include <net/genetlink.h>
16 #include <linux/ip.h>
17 #include <linux/delay.h>
18 #include <linux/etherdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/mutex.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/rcupdate.h>
24 #include <linux/version.h>
25 #include <linux/ethtool.h>
26 #include <linux/random.h>
27 #include <asm/system.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/inetdevice.h>
30 #include <linux/list.h>
31
32 #include "openflow-netlink.h"
33 #include "datapath.h"
34 #include "table.h"
35 #include "chain.h"
36 #include "forward.h"
37 #include "flow.h"
38 #include "datapath_t.h"
39
40 #include "compat.h"
41
42
43 /* Number of milliseconds between runs of the maintenance thread. */
44 #define MAINT_SLEEP_MSECS 1000
45
46 #define BRIDGE_PORT_NO_FLOOD    0x00000001 
47
48 #define UINT32_MAX                        4294967295U
49 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
50
51 struct net_bridge_port {
52         u16     port_no;
53         u32 flags;
54         struct datapath *dp;
55         struct net_device *dev;
56         struct list_head node; /* Element in datapath.ports. */
57 };
58
59 static struct genl_family dp_genl_family;
60 static struct genl_multicast_group mc_group;
61
62 int dp_dev_setup(struct net_device *dev);  
63
64 /* It's hard to imagine wanting more than one datapath, but... */
65 #define DP_MAX 32
66
67 /* datapaths.  Protected on the read side by rcu_read_lock, on the write side
68  * by dp_mutex.
69  *
70  * It is safe to access the datapath and net_bridge_port structures with just
71  * the dp_mutex, but to access the chain you need to take the rcu_read_lock
72  * also (because dp_mutex doesn't prevent flows from being destroyed).
73  */
74 static struct datapath *dps[DP_MAX];
75 static DEFINE_MUTEX(dp_mutex);
76
77 static int dp_maint_func(void *data);
78 static int send_port_status(struct net_bridge_port *p, uint8_t status);
79
80
81 /* nla_unreserve - reduce amount of space reserved by nla_reserve  
82  * @skb: socket buffer from which to recover room
83  * @nla: netlink attribute to adjust
84  * @len: amount by which to reduce attribute payload
85  *
86  * Reduces amount of space reserved by a call to nla_reserve.
87  *
88  * No other attributes may be added between calling nla_reserve and this
89  * function, since it will create a hole in the message.
90  */
91 void nla_unreserve(struct sk_buff *skb, struct nlattr *nla, int len)
92 {
93         skb->tail -= len;
94         skb->len  -= len;
95
96         nla->nla_len -= len;
97 }
98
99 static void *
100 alloc_openflow_skb(struct datapath *dp, size_t openflow_len, uint8_t type,
101                    const struct sender *sender, struct sk_buff **pskb) 
102 {
103         size_t genl_len;
104         struct sk_buff *skb;
105         struct nlattr *attr;
106         struct ofp_header *oh;
107
108         genl_len = nla_total_size(sizeof(uint32_t)); /* DP_GENL_A_DP_IDX */
109         genl_len += nla_total_size(openflow_len);    /* DP_GENL_A_OPENFLOW */
110         skb = *pskb = genlmsg_new(genl_len, GFP_ATOMIC);
111         if (!skb) {
112                 if (net_ratelimit())
113                         printk("alloc_openflow_skb: genlmsg_new failed\n");
114                 return NULL;
115         }
116
117         /* Assemble the Generic Netlink wrapper. */
118         if (!genlmsg_put(skb,
119                          sender ? sender->pid : 0,
120                          sender ? sender->seq : 0,
121                          &dp_genl_family, 0, DP_GENL_C_OPENFLOW))
122                 BUG();
123         if (nla_put_u32(skb, DP_GENL_A_DP_IDX, dp->dp_idx) < 0)
124                 BUG();
125         attr = nla_reserve(skb, DP_GENL_A_OPENFLOW, openflow_len);
126         BUG_ON(!attr);
127         nlmsg_end(skb, (struct nlmsghdr *) skb->data);
128
129         /* Fill in the header. */
130         oh = nla_data(attr);
131         oh->version = OFP_VERSION;
132         oh->type = type;
133         oh->length = htons(openflow_len);
134         oh->xid = sender ? sender->xid : 0;
135
136         return oh;
137 }
138
139 static void
140 resize_openflow_skb(struct sk_buff *skb,
141                     struct ofp_header *oh, size_t new_length)
142 {
143         struct nlattr *attr;
144
145         BUG_ON(new_length > ntohs(oh->length));
146         attr = ((void *) oh) - NLA_HDRLEN;
147         nla_unreserve(skb, attr, ntohs(oh->length) - new_length);
148         oh->length = htons(new_length);
149         nlmsg_end(skb, (struct nlmsghdr *) skb->data);
150 }
151
152 static int
153 send_openflow_skb(struct sk_buff *skb, const struct sender *sender) 
154 {
155         int err = (sender
156                    ? genlmsg_unicast(skb, sender->pid)
157                    : genlmsg_multicast(skb, 0, mc_group.id, GFP_ATOMIC));
158         if (err && net_ratelimit())
159                 printk(KERN_WARNING "send_openflow_skb: send failed: %d\n",
160                        err);
161         return err;
162 }
163
164 /* Generates a unique datapath id.  It incorporates the datapath index
165  * and a hardware address, if available.  If not, it generates a random
166  * one.
167  */
168 static 
169 uint64_t gen_datapath_id(uint16_t dp_idx)
170 {
171         uint64_t id;
172         int i;
173         struct net_device *dev;
174
175         /* The top 16 bits are used to identify the datapath.  The lower 48 bits
176          * use an interface address.  */
177         id = (uint64_t)dp_idx << 48;
178         if ((dev = dev_get_by_name(&init_net, "ctl0")) 
179                         || (dev = dev_get_by_name(&init_net, "eth0"))) {
180                 for (i=0; i<ETH_ALEN; i++) {
181                         id |= (uint64_t)dev->dev_addr[i] << (8*(ETH_ALEN-1 - i));
182                 }
183                 dev_put(dev);
184         } else {
185                 /* Randomly choose the lower 48 bits if we cannot find an
186                  * address and mark the most significant bit to indicate that
187                  * this was randomly generated. */
188                 uint8_t rand[ETH_ALEN];
189                 get_random_bytes(rand, ETH_ALEN);
190                 id |= (uint64_t)1 << 63;
191                 for (i=0; i<ETH_ALEN; i++) {
192                         id |= (uint64_t)rand[i] << (8*(ETH_ALEN-1 - i));
193                 }
194         }
195
196         return id;
197 }
198
199 /* Creates a new datapath numbered 'dp_idx'.  Returns 0 for success or a
200  * negative error code.
201  *
202  * Not called with any locks. */
203 static int new_dp(int dp_idx)
204 {
205         struct datapath *dp;
206         int err;
207
208         if (dp_idx < 0 || dp_idx >= DP_MAX)
209                 return -EINVAL;
210
211         if (!try_module_get(THIS_MODULE))
212                 return -ENODEV;
213
214         mutex_lock(&dp_mutex);
215         dp = rcu_dereference(dps[dp_idx]);
216         if (dp != NULL) {
217                 err = -EEXIST;
218                 goto err_unlock;
219         }
220
221         err = -ENOMEM;
222         dp = kzalloc(sizeof *dp, GFP_KERNEL);
223         if (dp == NULL)
224                 goto err_unlock;
225
226         dp->dp_idx = dp_idx;
227         dp->id = gen_datapath_id(dp_idx);
228         dp->chain = chain_create(dp);
229         if (dp->chain == NULL)
230                 goto err_free_dp;
231         INIT_LIST_HEAD(&dp->port_list);
232
233 #if 0
234         /* Setup our "of" device */
235         dp->dev.priv = dp;
236         rtnl_lock();
237         err = dp_dev_setup(&dp->dev);
238         rtnl_unlock();
239         if (err != 0) 
240                 printk("datapath: problem setting up 'of' device\n");
241 #endif
242
243         dp->config.flags = 0;
244         dp->config.miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
245
246         dp->dp_task = kthread_run(dp_maint_func, dp, "dp%d", dp_idx);
247         if (IS_ERR(dp->dp_task))
248                 goto err_free_dp;
249
250         rcu_assign_pointer(dps[dp_idx], dp);
251         mutex_unlock(&dp_mutex);
252
253         return 0;
254
255 err_free_dp:
256         kfree(dp);
257 err_unlock:
258         mutex_unlock(&dp_mutex);
259         module_put(THIS_MODULE);
260                 return err;
261 }
262
263 /* Find and return a free port number under 'dp'.  Called under dp_mutex. */
264 static int find_portno(struct datapath *dp)
265 {
266         int i;
267         for (i = 0; i < OFPP_MAX; i++)
268                 if (dp->ports[i] == NULL)
269                         return i;
270         return -EXFULL;
271 }
272
273 static struct net_bridge_port *new_nbp(struct datapath *dp,
274                                                                            struct net_device *dev)
275 {
276         struct net_bridge_port *p;
277         int port_no;
278
279         port_no = find_portno(dp);
280         if (port_no < 0)
281                 return ERR_PTR(port_no);
282
283         p = kzalloc(sizeof(*p), GFP_KERNEL);
284         if (p == NULL)
285                 return ERR_PTR(-ENOMEM);
286
287         p->dp = dp;
288         dev_hold(dev);
289         p->dev = dev;
290         p->port_no = port_no;
291
292         return p;
293 }
294
295 /* Called with dp_mutex. */
296 int add_switch_port(struct datapath *dp, struct net_device *dev)
297 {
298         struct net_bridge_port *p;
299
300         if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
301                 return -EINVAL;
302
303         if (dev->br_port != NULL)
304                 return -EBUSY;
305
306         p = new_nbp(dp, dev);
307         if (IS_ERR(p))
308                 return PTR_ERR(p);
309
310         dev_hold(dev);
311         rcu_assign_pointer(dev->br_port, p);
312         rtnl_lock();
313         dev_set_promiscuity(dev, 1);
314         rtnl_unlock();
315
316         rcu_assign_pointer(dp->ports[p->port_no], p);
317         list_add_rcu(&p->node, &dp->port_list);
318
319         /* Notify the ctlpath that this port has been added */
320         send_port_status(p, OFPPR_ADD);
321
322         return 0;
323 }
324
325 /* Delete 'p' from switch.
326  * Called with dp_mutex. */
327 static int del_switch_port(struct net_bridge_port *p)
328 {
329         /* First drop references to device. */
330         rtnl_lock();
331         dev_set_promiscuity(p->dev, -1);
332         rtnl_unlock();
333         list_del_rcu(&p->node);
334         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
335         rcu_assign_pointer(p->dev->br_port, NULL);
336
337         /* Then wait until no one is still using it, and destroy it. */
338         synchronize_rcu();
339
340         /* Notify the ctlpath that this port no longer exists */
341         send_port_status(p, OFPPR_DELETE);
342
343         dev_put(p->dev);
344         kfree(p);
345
346         return 0;
347 }
348
349 /* Called with dp_mutex. */
350 static void del_dp(struct datapath *dp)
351 {
352         struct net_bridge_port *p, *n;
353
354 #if 0
355         /* Unregister the "of" device of this dp */
356         rtnl_lock();
357         unregister_netdevice(&dp->dev);
358         rtnl_unlock();
359 #endif
360
361         kthread_stop(dp->dp_task);
362
363         /* Drop references to DP. */
364         list_for_each_entry_safe (p, n, &dp->port_list, node)
365                 del_switch_port(p);
366         rcu_assign_pointer(dps[dp->dp_idx], NULL);
367
368         /* Wait until no longer in use, then destroy it. */
369         synchronize_rcu();
370         chain_destroy(dp->chain);
371         kfree(dp);
372         module_put(THIS_MODULE);
373 }
374
375 static int dp_maint_func(void *data)
376 {
377         struct datapath *dp = (struct datapath *) data;
378
379         while (!kthread_should_stop()) {
380 #if 1
381                 chain_timeout(dp->chain);
382 #else
383                 int count = chain_timeout(dp->chain);
384                 chain_print_stats(dp->chain);
385                 if (count)
386                         printk("%d flows timed out\n", count);
387 #endif
388                 msleep_interruptible(MAINT_SLEEP_MSECS);
389         }
390                 
391         return 0;
392 }
393
394 /*
395  * Used as br_handle_frame_hook.  (Cannot run bridge at the same time, even on
396  * different set of devices!)  Returns 0 if *pskb should be processed further,
397  * 1 if *pskb is handled. */
398 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
399 /* Called with rcu_read_lock. */
400 static struct sk_buff *dp_frame_hook(struct net_bridge_port *p,
401                                          struct sk_buff *skb)
402 {
403         struct ethhdr *eh = eth_hdr(skb);
404         struct sk_buff *skb_local = NULL;
405
406
407         if (compare_ether_addr(eh->h_dest, skb->dev->dev_addr) == 0) 
408                 return skb;
409
410         if (is_broadcast_ether_addr(eh->h_dest)
411                                 || is_multicast_ether_addr(eh->h_dest)
412                                 || is_local_ether_addr(eh->h_dest)) 
413                 skb_local = skb_clone(skb, GFP_ATOMIC);
414
415         /* Push the Ethernet header back on. */
416         if (skb->protocol == htons(ETH_P_8021Q))
417                 skb_push(skb, VLAN_ETH_HLEN);
418         else
419                 skb_push(skb, ETH_HLEN);
420
421         fwd_port_input(p->dp->chain, skb, p->port_no);
422
423         return skb_local;
424 }
425 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
426 static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
427 {
428         /* Push the Ethernet header back on. */
429         if ((*pskb)->protocol == htons(ETH_P_8021Q))
430                 skb_push(*pskb, VLAN_ETH_HLEN);
431         else
432                 skb_push(*pskb, ETH_HLEN);
433
434         fwd_port_input(p->dp->chain, *pskb, p->port_no);
435         return 1;
436 }
437 #else 
438 /* NB: This has only been tested on 2.4.35 */
439
440 /* Called without any locks (?) */
441 static void dp_frame_hook(struct sk_buff *skb)
442 {
443         struct net_bridge_port *p = skb->dev->br_port;
444
445         /* Push the Ethernet header back on. */
446         if (skb->protocol == htons(ETH_P_8021Q))
447                 skb_push(skb, VLAN_ETH_HLEN);
448         else
449                 skb_push(skb, ETH_HLEN);
450
451         if (p) {
452                 rcu_read_lock();
453                 fwd_port_input(p->dp->chain, skb, p->port_no);
454                 rcu_read_unlock();
455         } else
456                 kfree_skb(skb);
457 }
458 #endif
459
460 /* Forwarding output path.
461  * Based on net/bridge/br_forward.c. */
462
463 /* Don't forward packets to originating port or with flooding disabled */
464 static inline int should_deliver(const struct net_bridge_port *p,
465                         const struct sk_buff *skb)
466 {
467         if ((skb->dev == p->dev) || (p->flags & BRIDGE_PORT_NO_FLOOD)) {
468                 return 0;
469         } 
470
471         return 1;
472 }
473
474 static inline unsigned packet_length(const struct sk_buff *skb)
475 {
476         int length = skb->len - ETH_HLEN;
477         if (skb->protocol == htons(ETH_P_8021Q))
478                 length -= VLAN_HLEN;
479         return length;
480 }
481
482 static int
483 flood(struct datapath *dp, struct sk_buff *skb)
484 {
485         struct net_bridge_port *p;
486         int prev_port;
487
488         prev_port = -1;
489         list_for_each_entry_rcu (p, &dp->port_list, node) {
490                 if (!should_deliver(p, skb))
491                         continue;
492                 if (prev_port != -1) {
493                         struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
494                         if (!clone) {
495                                 kfree_skb(skb);
496                                 return -ENOMEM;
497                         }
498                         dp_output_port(dp, clone, prev_port); 
499                 }
500                 prev_port = p->port_no;
501         }
502         if (prev_port != -1)
503                 dp_output_port(dp, skb, prev_port);
504         else
505                 kfree_skb(skb);
506
507         return 0;
508 }
509
510 /* Marks 'skb' as having originated from 'in_port' in 'dp'.
511    FIXME: how are devices reference counted? */
512 int dp_set_origin(struct datapath *dp, uint16_t in_port,
513                            struct sk_buff *skb)
514 {
515         if (in_port < OFPP_MAX && dp->ports[in_port]) {
516                 skb->dev = dp->ports[in_port]->dev;
517                 return 0;
518         }
519         return -ENOENT;
520 }
521
522 /* Takes ownership of 'skb' and transmits it to 'out_port' on 'dp'.
523  */
524 int dp_output_port(struct datapath *dp, struct sk_buff *skb, int out_port)
525 {
526         struct net_bridge_port *p;
527         int len = skb->len;
528
529         BUG_ON(!skb);
530         if (out_port == OFPP_FLOOD)
531                 return flood(dp, skb);
532         else if (out_port == OFPP_CONTROLLER)
533                 return dp_output_control(dp, skb, fwd_save_skb(skb), 0,
534                                                   OFPR_ACTION);
535         else if (out_port == OFPP_TABLE) {
536                 struct sw_flow_key key;
537                 struct sw_flow *flow;
538
539                 flow_extract(skb, skb->dev->br_port->port_no, &key);
540                 flow = chain_lookup(dp->chain, &key);
541                 if (likely(flow != NULL)) {
542                         flow_used(flow, skb);
543                         execute_actions(dp, skb, &key, flow->actions, flow->n_actions);
544                         return 0;
545                 }
546                 return -ESRCH;
547         } else if (out_port >= OFPP_MAX)
548                 goto bad_port;
549
550         p = dp->ports[out_port];
551         if (p == NULL)
552                 goto bad_port;
553
554         skb->dev = p->dev;
555         if (packet_length(skb) > skb->dev->mtu) {
556                 printk("dropped over-mtu packet: %d > %d\n",
557                                         packet_length(skb), skb->dev->mtu);
558                 kfree_skb(skb);
559                 return -E2BIG;
560         }
561
562         dev_queue_xmit(skb);
563
564         return len;
565
566 bad_port:
567         kfree_skb(skb);
568         if (net_ratelimit())
569                 printk("can't forward to bad port %d\n", out_port);
570         return -ENOENT;
571 }
572
573 /* Takes ownership of 'skb' and transmits it to 'dp''s control path.  If
574  * 'buffer_id' != -1, then only the first 64 bytes of 'skb' are sent;
575  * otherwise, all of 'skb' is sent.  'reason' indicates why 'skb' is being
576  * sent. 'max_len' sets the maximum number of bytes that the caller
577  * wants to be sent; a value of 0 indicates the entire packet should be
578  * sent. */
579 int
580 dp_output_control(struct datapath *dp, struct sk_buff *skb,
581                            uint32_t buffer_id, size_t max_len, int reason)
582 {
583         /* FIXME?  Can we avoid creating a new skbuff in the case where we
584          * forward the whole packet? */
585         struct sk_buff *f_skb;
586         struct ofp_packet_in *opi;
587         size_t fwd_len, opi_len;
588         int err;
589
590         fwd_len = skb->len;
591         if ((buffer_id != (uint32_t) -1) && max_len)
592                 fwd_len = min(fwd_len, max_len);
593
594         opi_len = offsetof(struct ofp_packet_in, data) + fwd_len;
595         opi = alloc_openflow_skb(dp, opi_len, OFPT_PACKET_IN, NULL, &f_skb);
596         opi->buffer_id      = htonl(buffer_id);
597         opi->total_len      = htons(skb->len);
598         opi->in_port        = htons(skb->dev->br_port->port_no);
599         opi->reason         = reason;
600         opi->pad            = 0;
601         memcpy(opi->data, skb_mac_header(skb), fwd_len);
602         err = send_openflow_skb(f_skb, NULL);
603
604         kfree_skb(skb);
605
606         return err;
607 }
608
609 static void fill_port_desc(struct net_bridge_port *p, struct ofp_phy_port *desc)
610 {
611         desc->port_no = htons(p->port_no);
612         strncpy(desc->name, p->dev->name, OFP_MAX_PORT_NAME_LEN);
613         desc->name[OFP_MAX_PORT_NAME_LEN-1] = '\0';
614         memcpy(desc->hw_addr, p->dev->dev_addr, ETH_ALEN);
615         desc->flags = htonl(p->flags);
616         desc->features = 0;
617         desc->speed = 0;
618
619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,24)
620         if (p->dev->ethtool_ops && p->dev->ethtool_ops->get_settings) {
621                 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
622
623                 if (!p->dev->ethtool_ops->get_settings(p->dev, &ecmd)) {
624                         if (ecmd.supported & SUPPORTED_10baseT_Half) 
625                                 desc->features |= OFPPF_10MB_HD;
626                         if (ecmd.supported & SUPPORTED_10baseT_Full)
627                                 desc->features |= OFPPF_10MB_FD;
628                         if (ecmd.supported & SUPPORTED_100baseT_Half) 
629                                 desc->features |= OFPPF_100MB_HD;
630                         if (ecmd.supported & SUPPORTED_100baseT_Full)
631                                 desc->features |= OFPPF_100MB_FD;
632                         if (ecmd.supported & SUPPORTED_1000baseT_Half)
633                                 desc->features |= OFPPF_1GB_HD;
634                         if (ecmd.supported & SUPPORTED_1000baseT_Full)
635                                 desc->features |= OFPPF_1GB_FD;
636                         /* 10Gbps half-duplex doesn't exist... */
637                         if (ecmd.supported & SUPPORTED_10000baseT_Full)
638                                 desc->features |= OFPPF_10GB_FD;
639
640                         desc->features = htonl(desc->features);
641                         desc->speed = htonl(ecmd.speed);
642                 }
643         }
644 #endif
645 }
646
647 static int 
648 fill_features_reply(struct datapath *dp, struct ofp_switch_features *ofr)
649 {
650         struct net_bridge_port *p;
651         int port_count = 0;
652
653         ofr->datapath_id    = cpu_to_be64(dp->id); 
654
655         ofr->n_exact        = htonl(2 * TABLE_HASH_MAX_FLOWS);
656         ofr->n_mac_only     = htonl(TABLE_MAC_MAX_FLOWS);
657         ofr->n_compression  = 0;                                           /* Not supported */
658         ofr->n_general      = htonl(TABLE_LINEAR_MAX_FLOWS);
659         ofr->buffer_mb      = htonl(UINT32_MAX);
660         ofr->n_buffers      = htonl(N_PKT_BUFFERS);
661         ofr->capabilities   = htonl(OFP_SUPPORTED_CAPABILITIES);
662         ofr->actions        = htonl(OFP_SUPPORTED_ACTIONS);
663
664         list_for_each_entry_rcu (p, &dp->port_list, node) {
665                 fill_port_desc(p, &ofr->ports[port_count]);
666                 port_count++;
667         }
668
669         return port_count;
670 }
671
672 int
673 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
674 {
675         struct sk_buff *skb;
676         struct ofp_switch_features *ofr;
677         size_t ofr_len, port_max_len;
678         int port_count;
679
680         /* Overallocate. */
681         port_max_len = sizeof(struct ofp_phy_port) * OFPP_MAX;
682         ofr = alloc_openflow_skb(dp, sizeof(*ofr) + port_max_len,
683                                  OFPT_FEATURES_REPLY, sender, &skb);
684         if (!ofr)
685                 return -ENOMEM;
686
687         /* Fill. */
688         port_count = fill_features_reply(dp, ofr);
689
690         /* Shrink to fit. */
691         ofr_len = sizeof(*ofr) + (sizeof(struct ofp_phy_port) * port_count);
692         resize_openflow_skb(skb, &ofr->header, ofr_len);
693         return send_openflow_skb(skb, sender);
694 }
695
696 int
697 dp_send_config_reply(struct datapath *dp, const struct sender *sender)
698 {
699         struct sk_buff *skb;
700         struct ofp_switch_config *osc;
701
702         osc = alloc_openflow_skb(dp, sizeof *osc, OFPT_PORT_STATUS, sender,
703                                  &skb);
704         if (!osc)
705                 return -ENOMEM;
706         memcpy(((char *)osc) + sizeof osc->header,
707                ((char *)&dp->config) + sizeof dp->config.header,
708                sizeof dp->config - sizeof dp->config.header);
709         return send_openflow_skb(skb, sender);
710 }
711
712 int
713 dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp)
714 {
715         struct net_bridge_port *p;
716
717         p = dp->ports[htons(opp->port_no)];
718
719         /* Make sure the port id hasn't changed since this was sent */
720         if (!p || memcmp(opp->hw_addr, p->dev->dev_addr, ETH_ALEN) != 0) 
721                 return -1;
722         
723         p->flags = htonl(opp->flags);
724
725         return 0;
726 }
727
728
729 static int
730 send_port_status(struct net_bridge_port *p, uint8_t status)
731 {
732         struct sk_buff *skb;
733         struct ofp_port_status *ops;
734
735         ops = alloc_openflow_skb(p->dp, sizeof *ops, OFPT_PORT_STATUS, NULL,
736                                  &skb);
737         if (!ops)
738                 return -ENOMEM;
739         ops->reason = status;
740         fill_port_desc(p, &ops->desc);
741
742         return send_openflow_skb(skb, NULL);
743 }
744
745 int 
746 dp_send_flow_expired(struct datapath *dp, struct sw_flow *flow)
747 {
748         struct sk_buff *skb;
749         struct ofp_flow_expired *ofe;
750         unsigned long duration_j;
751
752         ofe = alloc_openflow_skb(dp, sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &skb);
753         if (!ofe)
754                 return -ENOMEM;
755
756         flow_fill_match(&ofe->match, &flow->key);
757         duration_j = (flow->timeout - HZ * flow->max_idle) - flow->init_time;
758         ofe->duration   = htonl(duration_j / HZ);
759         ofe->packet_count   = cpu_to_be64(flow->packet_count);
760         ofe->byte_count     = cpu_to_be64(flow->byte_count);
761         return send_openflow_skb(skb, NULL);
762 }
763
764 static void
765 fill_flow_stats(struct ofp_flow_stats *ofs, struct sw_flow *flow,
766                 int table_idx)
767 {
768         ofs->match.wildcards = htons(flow->key.wildcards);
769         ofs->match.in_port   = flow->key.in_port;
770         memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
771         memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
772         ofs->match.dl_vlan   = flow->key.dl_vlan;
773         ofs->match.dl_type   = flow->key.dl_type;
774         ofs->match.nw_src    = flow->key.nw_src;
775         ofs->match.nw_dst    = flow->key.nw_dst;
776         ofs->match.nw_proto  = flow->key.nw_proto;
777         memset(ofs->match.pad, 0, sizeof ofs->match.pad);
778         ofs->match.tp_src    = flow->key.tp_src;
779         ofs->match.tp_dst    = flow->key.tp_dst;
780         ofs->duration        = htonl((jiffies - flow->init_time) / HZ);
781         ofs->table_id        = table_idx;
782         ofs->packet_count    = cpu_to_be64(flow->packet_count);
783         ofs->byte_count      = cpu_to_be64(flow->byte_count);
784 }
785
786 int
787 dp_send_flow_stats(struct datapath *dp, const struct sender *sender,
788                    const struct ofp_match *match)
789 {
790         struct sk_buff *skb;
791         struct ofp_flow_stat_reply *fsr;
792         size_t header_size, fudge, flow_size;
793         struct sw_flow_key match_key;
794         int table_idx, n_flows, max_flows;
795
796         header_size = offsetof(struct ofp_flow_stat_reply, flows);
797         fudge = 128;
798         flow_size = sizeof fsr->flows[0];
799         max_flows = (NLMSG_GOODSIZE - header_size - fudge) / flow_size;
800         fsr = alloc_openflow_skb(dp, header_size + max_flows * flow_size,
801                                  OFPT_FLOW_STAT_REPLY, sender, &skb);
802         if (!fsr)
803                 return -ENOMEM;
804
805         n_flows = 0;
806         flow_extract_match(&match_key, match);
807         for (table_idx = 0; table_idx < dp->chain->n_tables; table_idx++) {
808                 struct sw_table *table = dp->chain->tables[table_idx];
809                 struct swt_iterator iter;
810
811                 if (n_flows >= max_flows) {
812                         break;
813                 }
814
815                 if (!table->iterator(table, &iter)) {
816                         if (net_ratelimit())
817                                 printk("iterator failed for table %d\n",
818                                        table_idx);
819                         continue;
820                 }
821
822                 for (; iter.flow; table->iterator_next(&iter)) {
823                         if (flow_matches(&match_key, &iter.flow->key)) {
824                                 fill_flow_stats(&fsr->flows[n_flows],
825                                                 iter.flow, table_idx);
826                                 if (++n_flows >= max_flows) {
827                                         break;
828                                 }
829                         }
830                 }
831                 table->iterator_destroy(&iter);
832         }
833         resize_openflow_skb(skb, &fsr->header,
834                             header_size + flow_size * n_flows);
835         return send_openflow_skb(skb, sender);
836 }
837
838 static int 
839 fill_port_stat_reply(struct datapath *dp, struct ofp_port_stat_reply *psr)
840 {
841         struct net_bridge_port *p;
842         int port_count = 0;
843
844         list_for_each_entry_rcu (p, &dp->port_list, node) {
845                 struct ofp_port_stats *ps = &psr->ports[port_count++];
846                 struct net_device_stats *stats = p->dev->get_stats(p->dev);
847                 ps->port_no = htons(p->port_no);
848                 memset(ps->pad, 0, sizeof ps->pad);
849                 ps->rx_count = cpu_to_be64(stats->rx_packets);
850                 ps->tx_count = cpu_to_be64(stats->tx_packets);
851                 ps->drop_count = cpu_to_be64(stats->rx_dropped
852                                              + stats->tx_dropped);
853         }
854
855         return port_count;
856 }
857
858 int
859 dp_send_port_stats(struct datapath *dp, const struct sender *sender)
860 {
861         struct sk_buff *skb;
862         struct ofp_port_stat_reply *psr;
863         size_t psr_len, port_max_len;
864         int port_count;
865
866         /* Overallocate. */
867         port_max_len = sizeof(struct ofp_port_stats) * OFPP_MAX;
868         psr = alloc_openflow_skb(dp, sizeof *psr + port_max_len,
869                                  OFPT_PORT_STAT_REPLY, sender, &skb);
870         if (!psr)
871                 return -ENOMEM;
872
873         /* Fill. */
874         port_count = fill_port_stat_reply(dp, psr);
875
876         /* Shrink to fit. */
877         psr_len = sizeof *psr + sizeof(struct ofp_port_stats) * port_count;
878         resize_openflow_skb(skb, &psr->header, psr_len);
879         return send_openflow_skb(skb, sender);
880 }
881
882 int
883 dp_send_table_stats(struct datapath *dp, const struct sender *sender)
884 {
885         struct sk_buff *skb;
886         struct ofp_table_stat_reply *tsr;
887         int i, n_tables;
888
889         n_tables = dp->chain->n_tables;
890         tsr = alloc_openflow_skb(dp, (offsetof(struct ofp_table_stat_reply,
891                                                tables)
892                                       + sizeof tsr->tables[0] * n_tables),
893                                  OFPT_TABLE_STAT_REPLY, sender, &skb);
894         if (!tsr)
895                 return -ENOMEM;
896         for (i = 0; i < n_tables; i++) {
897                 struct ofp_table_stats *ots = &tsr->tables[i];
898                 struct sw_table_stats stats;
899                 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
900                 strncpy(ots->name, stats.name, sizeof ots->name);
901                 ots->table_id = i;
902                 ots->pad[0] = ots->pad[1] = 0;
903                 ots->max_entries = htonl(stats.max_flows);
904                 ots->active_count = htonl(stats.n_flows);
905                 ots->matched_count = cpu_to_be64(0); /* FIXME */
906         }
907         return send_openflow_skb(skb, sender);
908 }
909
910 /* Generic Netlink interface.
911  *
912  * See netlink(7) for an introduction to netlink.  See
913  * http://linux-net.osdl.org/index.php/Netlink for more information and
914  * pointers on how to work with netlink and Generic Netlink in the kernel and
915  * in userspace. */
916
917 static struct genl_family dp_genl_family = {
918         .id = GENL_ID_GENERATE,
919         .hdrsize = 0,
920         .name = DP_GENL_FAMILY_NAME,
921         .version = 1,
922         .maxattr = DP_GENL_A_MAX,
923 };
924
925 /* Attribute policy: what each attribute may contain.  */
926 static struct nla_policy dp_genl_policy[DP_GENL_A_MAX + 1] = {
927         [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
928         [DP_GENL_A_MC_GROUP] = { .type = NLA_U32 },
929         [DP_GENL_A_PORTNAME] = { .type = NLA_STRING }
930 };
931
932 static int dp_genl_add(struct sk_buff *skb, struct genl_info *info)
933 {
934         if (!info->attrs[DP_GENL_A_DP_IDX])
935                 return -EINVAL;
936
937         return new_dp(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
938 }
939
940 static struct genl_ops dp_genl_ops_add_dp = {
941         .cmd = DP_GENL_C_ADD_DP,
942         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
943         .policy = dp_genl_policy,
944         .doit = dp_genl_add,
945         .dumpit = NULL,
946 };
947
948 struct datapath *dp_get(int dp_idx)
949 {
950         if (dp_idx < 0 || dp_idx > DP_MAX)
951                 return NULL;
952         return rcu_dereference(dps[dp_idx]);
953 }
954
955 static int dp_genl_del(struct sk_buff *skb, struct genl_info *info)
956 {
957         struct datapath *dp;
958         int err;
959
960         if (!info->attrs[DP_GENL_A_DP_IDX])
961                 return -EINVAL;
962
963         mutex_lock(&dp_mutex);
964         dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX])));
965         if (!dp)
966                 err = -ENOENT;
967         else {
968                 del_dp(dp);
969                 err = 0;
970         }
971         mutex_unlock(&dp_mutex);
972         return err;
973 }
974
975 static struct genl_ops dp_genl_ops_del_dp = {
976         .cmd = DP_GENL_C_DEL_DP,
977         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
978         .policy = dp_genl_policy,
979         .doit = dp_genl_del,
980         .dumpit = NULL,
981 };
982
983 /* Queries a datapath for related information.  Currently the only relevant
984  * information is the datapath's multicast group ID.  Really we want one
985  * multicast group per datapath, but because of locking issues[*] we can't
986  * easily get one.  Thus, every datapath will currently return the same
987  * global multicast group ID, but in the future it would be nice to fix that.
988  *
989  * [*] dp_genl_add, to add a new datapath, is called under the genl_lock
990  *       mutex, and genl_register_mc_group, called to acquire a new multicast
991  *       group ID, also acquires genl_lock, thus deadlock.
992  */
993 static int dp_genl_query(struct sk_buff *skb, struct genl_info *info)
994 {
995         struct datapath *dp;
996         struct sk_buff *ans_skb = NULL;
997         int dp_idx;
998         int err = -ENOMEM;
999
1000         if (!info->attrs[DP_GENL_A_DP_IDX])
1001                 return -EINVAL;
1002
1003         rcu_read_lock();
1004         dp_idx = nla_get_u32((info->attrs[DP_GENL_A_DP_IDX]));
1005         dp = dp_get(dp_idx);
1006         if (!dp)
1007                 err = -ENOENT;
1008         else {
1009                 void *data;
1010                 ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1011                 if (!ans_skb) {
1012                         err = -ENOMEM;
1013                         goto err;
1014                 }
1015                 data = genlmsg_put_reply(ans_skb, info, &dp_genl_family,
1016                                          0, DP_GENL_C_QUERY_DP);
1017                 if (data == NULL) {
1018                         err = -ENOMEM;
1019                         goto err;
1020                 }
1021                 NLA_PUT_U32(ans_skb, DP_GENL_A_DP_IDX, dp_idx);
1022                 NLA_PUT_U32(ans_skb, DP_GENL_A_MC_GROUP, mc_group.id);
1023
1024                 genlmsg_end(ans_skb, data);
1025                 err = genlmsg_reply(ans_skb, info);
1026                 if (!err)
1027                         ans_skb = NULL;
1028         }
1029 err:
1030 nla_put_failure:
1031         if (ans_skb)
1032                 kfree_skb(ans_skb);
1033         rcu_read_unlock();
1034         return err;
1035 }
1036
1037 static struct genl_ops dp_genl_ops_query_dp = {
1038         .cmd = DP_GENL_C_QUERY_DP,
1039         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1040         .policy = dp_genl_policy,
1041         .doit = dp_genl_query,
1042         .dumpit = NULL,
1043 };
1044
1045 static int dp_genl_add_del_port(struct sk_buff *skb, struct genl_info *info)
1046 {
1047         struct datapath *dp;
1048         struct net_device *port;
1049         int err;
1050
1051         if (!info->attrs[DP_GENL_A_DP_IDX] || !info->attrs[DP_GENL_A_PORTNAME])
1052                 return -EINVAL;
1053
1054         /* Get datapath. */
1055         mutex_lock(&dp_mutex);
1056         dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1057         if (!dp) {
1058                 err = -ENOENT;
1059                 goto out;
1060         }
1061
1062         /* Get interface to add/remove. */
1063         port = dev_get_by_name(&init_net, 
1064                         nla_data(info->attrs[DP_GENL_A_PORTNAME]));
1065         if (!port) {
1066                 err = -ENOENT;
1067                 goto out;
1068         }
1069
1070         /* Execute operation. */
1071         if (info->genlhdr->cmd == DP_GENL_C_ADD_PORT)
1072                 err = add_switch_port(dp, port);
1073         else {
1074                 if (port->br_port == NULL || port->br_port->dp != dp) {
1075                         err = -ENOENT;
1076                         goto out_put;
1077                 }
1078                 err = del_switch_port(port->br_port);
1079         }
1080
1081 out_put:
1082         dev_put(port);
1083 out:
1084         mutex_unlock(&dp_mutex);
1085         return err;
1086 }
1087
1088 static struct genl_ops dp_genl_ops_add_port = {
1089         .cmd = DP_GENL_C_ADD_PORT,
1090         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1091         .policy = dp_genl_policy,
1092         .doit = dp_genl_add_del_port,
1093         .dumpit = NULL,
1094 };
1095
1096 static struct genl_ops dp_genl_ops_del_port = {
1097         .cmd = DP_GENL_C_DEL_PORT,
1098         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1099         .policy = dp_genl_policy,
1100         .doit = dp_genl_add_del_port,
1101         .dumpit = NULL,
1102 };
1103
1104 static int dp_genl_openflow(struct sk_buff *skb, struct genl_info *info)
1105 {
1106         struct nlattr *va = info->attrs[DP_GENL_A_OPENFLOW];
1107         struct datapath *dp;
1108         struct ofp_header *oh;
1109         struct sender sender;
1110         int err;
1111
1112         if (!info->attrs[DP_GENL_A_DP_IDX] || !va)
1113                 return -EINVAL;
1114
1115         rcu_read_lock();
1116         dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1117         if (!dp) {
1118                 err = -ENOENT;
1119                 goto out;
1120         }
1121
1122         if (nla_len(va) < sizeof(struct ofp_header)) {
1123                 err = -EINVAL;
1124                 goto out;
1125         }
1126         oh = nla_data(va);
1127
1128         sender.xid = oh->xid;
1129         sender.pid = info->snd_pid;
1130         sender.seq = info->snd_seq;
1131         err = fwd_control_input(dp->chain, &sender, nla_data(va), nla_len(va));
1132
1133 out:
1134         rcu_read_unlock();
1135         return err;
1136 }
1137
1138 static struct nla_policy dp_genl_openflow_policy[DP_GENL_A_MAX + 1] = {
1139         [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1140 };
1141
1142 static struct genl_ops dp_genl_ops_openflow = {
1143         .cmd = DP_GENL_C_OPENFLOW,
1144         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1145         .policy = dp_genl_openflow_policy,
1146         .doit = dp_genl_openflow,
1147         .dumpit = NULL,
1148 };
1149
1150 static struct nla_policy dp_genl_benchmark_policy[DP_GENL_A_MAX + 1] = {
1151         [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1152         [DP_GENL_A_NPACKETS] = { .type = NLA_U32 },
1153         [DP_GENL_A_PSIZE] = { .type = NLA_U32 },
1154 };
1155
1156 static struct genl_ops dp_genl_ops_benchmark_nl = {
1157         .cmd = DP_GENL_C_BENCHMARK_NL,
1158         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1159         .policy = dp_genl_benchmark_policy,
1160         .doit = dp_genl_benchmark_nl,
1161         .dumpit = NULL,
1162 };
1163
1164 static struct genl_ops *dp_genl_all_ops[] = {
1165         /* Keep this operation first.  Generic Netlink dispatching
1166          * looks up operations with linear search, so we want it at the
1167          * front. */
1168         &dp_genl_ops_openflow,
1169
1170         &dp_genl_ops_add_dp,
1171         &dp_genl_ops_del_dp,
1172         &dp_genl_ops_query_dp,
1173         &dp_genl_ops_add_port,
1174         &dp_genl_ops_del_port,
1175         &dp_genl_ops_benchmark_nl,
1176 };
1177
1178 static int dp_init_netlink(void)
1179 {
1180         int err;
1181         int i;
1182
1183         err = genl_register_family(&dp_genl_family);
1184         if (err)
1185                 return err;
1186
1187         for (i = 0; i < ARRAY_SIZE(dp_genl_all_ops); i++) {
1188                 err = genl_register_ops(&dp_genl_family, dp_genl_all_ops[i]);
1189                 if (err)
1190                         goto err_unregister;
1191         }
1192
1193         strcpy(mc_group.name, "openflow");
1194         err = genl_register_mc_group(&dp_genl_family, &mc_group);
1195         if (err < 0)
1196                 goto err_unregister;
1197
1198         return 0;
1199
1200 err_unregister:
1201         genl_unregister_family(&dp_genl_family);
1202                 return err;
1203 }
1204
1205 static void dp_uninit_netlink(void)
1206 {
1207         genl_unregister_family(&dp_genl_family);
1208 }
1209
1210 #define DRV_NAME                "openflow"
1211 #define DRV_VERSION      VERSION
1212 #define DRV_DESCRIPTION "OpenFlow switching datapath implementation"
1213 #define DRV_COPYRIGHT   "Copyright (c) 2007, 2008 The Board of Trustees of The Leland Stanford Junior University"
1214
1215
1216 static int __init dp_init(void)
1217 {
1218         int err;
1219
1220         printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION "\n");
1221         printk(KERN_INFO DRV_NAME ": " VERSION" built on "__DATE__" "__TIME__"\n");
1222         printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
1223
1224         err = flow_init();
1225         if (err)
1226                 goto error;
1227
1228         err = dp_init_netlink();
1229         if (err)
1230                 goto error_flow_exit;
1231
1232         /* Hook into callback used by the bridge to intercept packets.
1233          * Parasites we are. */
1234         if (br_handle_frame_hook)
1235                 printk("openflow: hijacking bridge hook\n");
1236         br_handle_frame_hook = dp_frame_hook;
1237
1238         return 0;
1239
1240 error_flow_exit:
1241         flow_exit();
1242 error:
1243         printk(KERN_EMERG "openflow: failed to install!");
1244         return err;
1245 }
1246
1247 static void dp_cleanup(void)
1248 {
1249         fwd_exit();
1250         dp_uninit_netlink();
1251         flow_exit();
1252         br_handle_frame_hook = NULL;
1253 }
1254
1255 module_init(dp_init);
1256 module_exit(dp_cleanup);
1257
1258 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1259 MODULE_AUTHOR(DRV_COPYRIGHT);
1260 MODULE_LICENSE("GPL");