- Add support for flow entry priorities.
[sliver-openvswitch.git] / datapath / datapath.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 /* Functions for managing the dp interface/device. */
8
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/if_arp.h>
12 #include <linux/if_bridge.h>
13 #include <linux/if_vlan.h>
14 #include <linux/in.h>
15 #include <net/genetlink.h>
16 #include <linux/ip.h>
17 #include <linux/delay.h>
18 #include <linux/etherdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/mutex.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/rcupdate.h>
24 #include <linux/version.h>
25 #include <linux/ethtool.h>
26 #include <linux/random.h>
27 #include <asm/system.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/inetdevice.h>
30 #include <linux/list.h>
31
32 #include "openflow-netlink.h"
33 #include "datapath.h"
34 #include "table.h"
35 #include "chain.h"
36 #include "forward.h"
37 #include "flow.h"
38 #include "datapath_t.h"
39
40 #include "compat.h"
41
42
43 /* Number of milliseconds between runs of the maintenance thread. */
44 #define MAINT_SLEEP_MSECS 1000
45
46 #define BRIDGE_PORT_NO_FLOOD    0x00000001 
47
48 #define UINT32_MAX                        4294967295U
49 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
50
51 struct net_bridge_port {
52         u16     port_no;
53         u32 flags;
54         struct datapath *dp;
55         struct net_device *dev;
56         struct list_head node; /* Element in datapath.ports. */
57 };
58
59 static struct genl_family dp_genl_family;
60 static struct genl_multicast_group mc_group;
61
62 int dp_dev_setup(struct net_device *dev);  
63
64 /* It's hard to imagine wanting more than one datapath, but... */
65 #define DP_MAX 32
66
67 /* datapaths.  Protected on the read side by rcu_read_lock, on the write side
68  * by dp_mutex.
69  *
70  * It is safe to access the datapath and net_bridge_port structures with just
71  * the dp_mutex, but to access the chain you need to take the rcu_read_lock
72  * also (because dp_mutex doesn't prevent flows from being destroyed).
73  */
74 static struct datapath *dps[DP_MAX];
75 static DEFINE_MUTEX(dp_mutex);
76
77 static int dp_maint_func(void *data);
78 static int send_port_status(struct net_bridge_port *p, uint8_t status);
79
80
81 /* nla_unreserve - reduce amount of space reserved by nla_reserve  
82  * @skb: socket buffer from which to recover room
83  * @nla: netlink attribute to adjust
84  * @len: amount by which to reduce attribute payload
85  *
86  * Reduces amount of space reserved by a call to nla_reserve.
87  *
88  * No other attributes may be added between calling nla_reserve and this
89  * function, since it will create a hole in the message.
90  */
91 void nla_unreserve(struct sk_buff *skb, struct nlattr *nla, int len)
92 {
93         skb->tail -= len;
94         skb->len  -= len;
95
96         nla->nla_len -= len;
97 }
98
99 static void *
100 alloc_openflow_skb(struct datapath *dp, size_t openflow_len, uint8_t type,
101                    const struct sender *sender, struct sk_buff **pskb) 
102 {
103         size_t genl_len;
104         struct sk_buff *skb;
105         struct nlattr *attr;
106         struct ofp_header *oh;
107
108         genl_len = nlmsg_total_size(GENL_HDRLEN + dp_genl_family.hdrsize);
109         genl_len += nla_total_size(sizeof(uint32_t)); /* DP_GENL_A_DP_IDX */
110         genl_len += nla_total_size(openflow_len);    /* DP_GENL_A_OPENFLOW */
111         skb = *pskb = genlmsg_new(genl_len, GFP_ATOMIC);
112         if (!skb) {
113                 if (net_ratelimit())
114                         printk("alloc_openflow_skb: genlmsg_new failed\n");
115                 return NULL;
116         }
117
118         /* Assemble the Generic Netlink wrapper. */
119         if (!genlmsg_put(skb,
120                          sender ? sender->pid : 0,
121                          sender ? sender->seq : 0,
122                          &dp_genl_family, 0, DP_GENL_C_OPENFLOW))
123                 BUG();
124         if (nla_put_u32(skb, DP_GENL_A_DP_IDX, dp->dp_idx) < 0)
125                 BUG();
126         attr = nla_reserve(skb, DP_GENL_A_OPENFLOW, openflow_len);
127         BUG_ON(!attr);
128         nlmsg_end(skb, (struct nlmsghdr *) skb->data);
129
130         /* Fill in the header. */
131         oh = nla_data(attr);
132         oh->version = OFP_VERSION;
133         oh->type = type;
134         oh->length = htons(openflow_len);
135         oh->xid = sender ? sender->xid : 0;
136
137         return oh;
138 }
139
140 static void
141 resize_openflow_skb(struct sk_buff *skb,
142                     struct ofp_header *oh, size_t new_length)
143 {
144         struct nlattr *attr;
145
146         BUG_ON(new_length > ntohs(oh->length));
147         attr = ((void *) oh) - NLA_HDRLEN;
148         nla_unreserve(skb, attr, ntohs(oh->length) - new_length);
149         oh->length = htons(new_length);
150         nlmsg_end(skb, (struct nlmsghdr *) skb->data);
151 }
152
153 static int
154 send_openflow_skb(struct sk_buff *skb, const struct sender *sender) 
155 {
156         int err = (sender
157                    ? genlmsg_unicast(skb, sender->pid)
158                    : genlmsg_multicast(skb, 0, mc_group.id, GFP_ATOMIC));
159         if (err && net_ratelimit())
160                 printk(KERN_WARNING "send_openflow_skb: send failed: %d\n",
161                        err);
162         return err;
163 }
164
165 /* Generates a unique datapath id.  It incorporates the datapath index
166  * and a hardware address, if available.  If not, it generates a random
167  * one.
168  */
169 static 
170 uint64_t gen_datapath_id(uint16_t dp_idx)
171 {
172         uint64_t id;
173         int i;
174         struct net_device *dev;
175
176         /* The top 16 bits are used to identify the datapath.  The lower 48 bits
177          * use an interface address.  */
178         id = (uint64_t)dp_idx << 48;
179         if ((dev = dev_get_by_name(&init_net, "ctl0")) 
180                         || (dev = dev_get_by_name(&init_net, "eth0"))) {
181                 for (i=0; i<ETH_ALEN; i++) {
182                         id |= (uint64_t)dev->dev_addr[i] << (8*(ETH_ALEN-1 - i));
183                 }
184                 dev_put(dev);
185         } else {
186                 /* Randomly choose the lower 48 bits if we cannot find an
187                  * address and mark the most significant bit to indicate that
188                  * this was randomly generated. */
189                 uint8_t rand[ETH_ALEN];
190                 get_random_bytes(rand, ETH_ALEN);
191                 id |= (uint64_t)1 << 63;
192                 for (i=0; i<ETH_ALEN; i++) {
193                         id |= (uint64_t)rand[i] << (8*(ETH_ALEN-1 - i));
194                 }
195         }
196
197         return id;
198 }
199
200 /* Creates a new datapath numbered 'dp_idx'.  Returns 0 for success or a
201  * negative error code.
202  *
203  * Not called with any locks. */
204 static int new_dp(int dp_idx)
205 {
206         struct datapath *dp;
207         int err;
208
209         if (dp_idx < 0 || dp_idx >= DP_MAX)
210                 return -EINVAL;
211
212         if (!try_module_get(THIS_MODULE))
213                 return -ENODEV;
214
215         mutex_lock(&dp_mutex);
216         dp = rcu_dereference(dps[dp_idx]);
217         if (dp != NULL) {
218                 err = -EEXIST;
219                 goto err_unlock;
220         }
221
222         err = -ENOMEM;
223         dp = kzalloc(sizeof *dp, GFP_KERNEL);
224         if (dp == NULL)
225                 goto err_unlock;
226
227         dp->dp_idx = dp_idx;
228         dp->id = gen_datapath_id(dp_idx);
229         dp->chain = chain_create(dp);
230         if (dp->chain == NULL)
231                 goto err_free_dp;
232         INIT_LIST_HEAD(&dp->port_list);
233
234 #if 0
235         /* Setup our "of" device */
236         dp->dev.priv = dp;
237         rtnl_lock();
238         err = dp_dev_setup(&dp->dev);
239         rtnl_unlock();
240         if (err != 0) 
241                 printk("datapath: problem setting up 'of' device\n");
242 #endif
243
244         dp->flags = 0;
245         dp->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
246
247         dp->dp_task = kthread_run(dp_maint_func, dp, "dp%d", dp_idx);
248         if (IS_ERR(dp->dp_task))
249                 goto err_free_dp;
250
251         rcu_assign_pointer(dps[dp_idx], dp);
252         mutex_unlock(&dp_mutex);
253
254         return 0;
255
256 err_free_dp:
257         kfree(dp);
258 err_unlock:
259         mutex_unlock(&dp_mutex);
260         module_put(THIS_MODULE);
261                 return err;
262 }
263
264 /* Find and return a free port number under 'dp'.  Called under dp_mutex. */
265 static int find_portno(struct datapath *dp)
266 {
267         int i;
268         for (i = 0; i < OFPP_MAX; i++)
269                 if (dp->ports[i] == NULL)
270                         return i;
271         return -EXFULL;
272 }
273
274 static struct net_bridge_port *new_nbp(struct datapath *dp,
275                                                                            struct net_device *dev)
276 {
277         struct net_bridge_port *p;
278         int port_no;
279
280         port_no = find_portno(dp);
281         if (port_no < 0)
282                 return ERR_PTR(port_no);
283
284         p = kzalloc(sizeof(*p), GFP_KERNEL);
285         if (p == NULL)
286                 return ERR_PTR(-ENOMEM);
287
288         p->dp = dp;
289         dev_hold(dev);
290         p->dev = dev;
291         p->port_no = port_no;
292
293         return p;
294 }
295
296 /* Called with dp_mutex. */
297 int add_switch_port(struct datapath *dp, struct net_device *dev)
298 {
299         struct net_bridge_port *p;
300
301         if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
302                 return -EINVAL;
303
304         if (dev->br_port != NULL)
305                 return -EBUSY;
306
307         p = new_nbp(dp, dev);
308         if (IS_ERR(p))
309                 return PTR_ERR(p);
310
311         dev_hold(dev);
312         rcu_assign_pointer(dev->br_port, p);
313         rtnl_lock();
314         dev_set_promiscuity(dev, 1);
315         rtnl_unlock();
316
317         rcu_assign_pointer(dp->ports[p->port_no], p);
318         list_add_rcu(&p->node, &dp->port_list);
319
320         /* Notify the ctlpath that this port has been added */
321         send_port_status(p, OFPPR_ADD);
322
323         return 0;
324 }
325
326 /* Delete 'p' from switch.
327  * Called with dp_mutex. */
328 static int del_switch_port(struct net_bridge_port *p)
329 {
330         /* First drop references to device. */
331         rtnl_lock();
332         dev_set_promiscuity(p->dev, -1);
333         rtnl_unlock();
334         list_del_rcu(&p->node);
335         rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
336         rcu_assign_pointer(p->dev->br_port, NULL);
337
338         /* Then wait until no one is still using it, and destroy it. */
339         synchronize_rcu();
340
341         /* Notify the ctlpath that this port no longer exists */
342         send_port_status(p, OFPPR_DELETE);
343
344         dev_put(p->dev);
345         kfree(p);
346
347         return 0;
348 }
349
350 /* Called with dp_mutex. */
351 static void del_dp(struct datapath *dp)
352 {
353         struct net_bridge_port *p, *n;
354
355 #if 0
356         /* Unregister the "of" device of this dp */
357         rtnl_lock();
358         unregister_netdevice(&dp->dev);
359         rtnl_unlock();
360 #endif
361
362         kthread_stop(dp->dp_task);
363
364         /* Drop references to DP. */
365         list_for_each_entry_safe (p, n, &dp->port_list, node)
366                 del_switch_port(p);
367         rcu_assign_pointer(dps[dp->dp_idx], NULL);
368
369         /* Wait until no longer in use, then destroy it. */
370         synchronize_rcu();
371         chain_destroy(dp->chain);
372         kfree(dp);
373         module_put(THIS_MODULE);
374 }
375
376 static int dp_maint_func(void *data)
377 {
378         struct datapath *dp = (struct datapath *) data;
379
380         while (!kthread_should_stop()) {
381 #if 1
382                 chain_timeout(dp->chain);
383 #else
384                 int count = chain_timeout(dp->chain);
385                 chain_print_stats(dp->chain);
386                 if (count)
387                         printk("%d flows timed out\n", count);
388 #endif
389                 msleep_interruptible(MAINT_SLEEP_MSECS);
390         }
391                 
392         return 0;
393 }
394
395 /*
396  * Used as br_handle_frame_hook.  (Cannot run bridge at the same time, even on
397  * different set of devices!)  Returns 0 if *pskb should be processed further,
398  * 1 if *pskb is handled. */
399 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
400 /* Called with rcu_read_lock. */
401 static struct sk_buff *dp_frame_hook(struct net_bridge_port *p,
402                                          struct sk_buff *skb)
403 {
404         struct ethhdr *eh = eth_hdr(skb);
405         struct sk_buff *skb_local = NULL;
406
407
408         if (compare_ether_addr(eh->h_dest, skb->dev->dev_addr) == 0) 
409                 return skb;
410
411         if (is_broadcast_ether_addr(eh->h_dest)
412                                 || is_multicast_ether_addr(eh->h_dest)
413                                 || is_local_ether_addr(eh->h_dest)) 
414                 skb_local = skb_clone(skb, GFP_ATOMIC);
415
416         /* Push the Ethernet header back on. */
417         if (skb->protocol == htons(ETH_P_8021Q))
418                 skb_push(skb, VLAN_ETH_HLEN);
419         else
420                 skb_push(skb, ETH_HLEN);
421
422         fwd_port_input(p->dp->chain, skb, p->port_no);
423
424         return skb_local;
425 }
426 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
427 static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
428 {
429         /* Push the Ethernet header back on. */
430         if ((*pskb)->protocol == htons(ETH_P_8021Q))
431                 skb_push(*pskb, VLAN_ETH_HLEN);
432         else
433                 skb_push(*pskb, ETH_HLEN);
434
435         fwd_port_input(p->dp->chain, *pskb, p->port_no);
436         return 1;
437 }
438 #else 
439 /* NB: This has only been tested on 2.4.35 */
440
441 /* Called without any locks (?) */
442 static void dp_frame_hook(struct sk_buff *skb)
443 {
444         struct net_bridge_port *p = skb->dev->br_port;
445
446         /* Push the Ethernet header back on. */
447         if (skb->protocol == htons(ETH_P_8021Q))
448                 skb_push(skb, VLAN_ETH_HLEN);
449         else
450                 skb_push(skb, ETH_HLEN);
451
452         if (p) {
453                 rcu_read_lock();
454                 fwd_port_input(p->dp->chain, skb, p->port_no);
455                 rcu_read_unlock();
456         } else
457                 kfree_skb(skb);
458 }
459 #endif
460
461 /* Forwarding output path.
462  * Based on net/bridge/br_forward.c. */
463
464 /* Don't forward packets to originating port or with flooding disabled */
465 static inline int should_deliver(const struct net_bridge_port *p,
466                         const struct sk_buff *skb)
467 {
468         if ((skb->dev == p->dev) || (p->flags & BRIDGE_PORT_NO_FLOOD)) {
469                 return 0;
470         } 
471
472         return 1;
473 }
474
475 static inline unsigned packet_length(const struct sk_buff *skb)
476 {
477         int length = skb->len - ETH_HLEN;
478         if (skb->protocol == htons(ETH_P_8021Q))
479                 length -= VLAN_HLEN;
480         return length;
481 }
482
483 static int
484 flood(struct datapath *dp, struct sk_buff *skb)
485 {
486         struct net_bridge_port *p;
487         int prev_port;
488
489         prev_port = -1;
490         list_for_each_entry_rcu (p, &dp->port_list, node) {
491                 if (!should_deliver(p, skb))
492                         continue;
493                 if (prev_port != -1) {
494                         struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
495                         if (!clone) {
496                                 kfree_skb(skb);
497                                 return -ENOMEM;
498                         }
499                         dp_output_port(dp, clone, prev_port); 
500                 }
501                 prev_port = p->port_no;
502         }
503         if (prev_port != -1)
504                 dp_output_port(dp, skb, prev_port);
505         else
506                 kfree_skb(skb);
507
508         return 0;
509 }
510
511 /* Marks 'skb' as having originated from 'in_port' in 'dp'.
512    FIXME: how are devices reference counted? */
513 int dp_set_origin(struct datapath *dp, uint16_t in_port,
514                            struct sk_buff *skb)
515 {
516         if (in_port < OFPP_MAX && dp->ports[in_port]) {
517                 skb->dev = dp->ports[in_port]->dev;
518                 return 0;
519         }
520         return -ENOENT;
521 }
522
523 /* Takes ownership of 'skb' and transmits it to 'out_port' on 'dp'.
524  */
525 int dp_output_port(struct datapath *dp, struct sk_buff *skb, int out_port)
526 {
527         struct net_bridge_port *p;
528         int len = skb->len;
529
530         BUG_ON(!skb);
531         if (out_port == OFPP_FLOOD)
532                 return flood(dp, skb);
533         else if (out_port == OFPP_CONTROLLER)
534                 return dp_output_control(dp, skb, fwd_save_skb(skb), 0,
535                                                   OFPR_ACTION);
536         else if (out_port == OFPP_TABLE) {
537                 struct sw_flow_key key;
538                 struct sw_flow *flow;
539
540                 flow_extract(skb, skb->dev->br_port->port_no, &key);
541                 flow = chain_lookup(dp->chain, &key);
542                 if (likely(flow != NULL)) {
543                         flow_used(flow, skb);
544                         execute_actions(dp, skb, &key, flow->actions, flow->n_actions);
545                         return 0;
546                 }
547                 return -ESRCH;
548         } else if (out_port >= OFPP_MAX)
549                 goto bad_port;
550
551         p = dp->ports[out_port];
552         if (p == NULL)
553                 goto bad_port;
554
555         skb->dev = p->dev;
556         if (packet_length(skb) > skb->dev->mtu) {
557                 printk("dropped over-mtu packet: %d > %d\n",
558                                         packet_length(skb), skb->dev->mtu);
559                 kfree_skb(skb);
560                 return -E2BIG;
561         }
562
563         dev_queue_xmit(skb);
564
565         return len;
566
567 bad_port:
568         kfree_skb(skb);
569         if (net_ratelimit())
570                 printk("can't forward to bad port %d\n", out_port);
571         return -ENOENT;
572 }
573
574 /* Takes ownership of 'skb' and transmits it to 'dp''s control path.  If
575  * 'buffer_id' != -1, then only the first 64 bytes of 'skb' are sent;
576  * otherwise, all of 'skb' is sent.  'reason' indicates why 'skb' is being
577  * sent. 'max_len' sets the maximum number of bytes that the caller
578  * wants to be sent; a value of 0 indicates the entire packet should be
579  * sent. */
580 int
581 dp_output_control(struct datapath *dp, struct sk_buff *skb,
582                            uint32_t buffer_id, size_t max_len, int reason)
583 {
584         /* FIXME?  Can we avoid creating a new skbuff in the case where we
585          * forward the whole packet? */
586         struct sk_buff *f_skb;
587         struct ofp_packet_in *opi;
588         size_t fwd_len, opi_len;
589         int err;
590
591         fwd_len = skb->len;
592         if ((buffer_id != (uint32_t) -1) && max_len)
593                 fwd_len = min(fwd_len, max_len);
594
595         opi_len = offsetof(struct ofp_packet_in, data) + fwd_len;
596         opi = alloc_openflow_skb(dp, opi_len, OFPT_PACKET_IN, NULL, &f_skb);
597         if (!opi) {
598                 err = -ENOMEM;
599                 goto out;
600         }
601         opi->buffer_id      = htonl(buffer_id);
602         opi->total_len      = htons(skb->len);
603         opi->in_port        = htons(skb->dev->br_port->port_no);
604         opi->reason         = reason;
605         opi->pad            = 0;
606         memcpy(opi->data, skb_mac_header(skb), fwd_len);
607         err = send_openflow_skb(f_skb, NULL);
608
609 out:
610         kfree_skb(skb);
611         return err;
612 }
613
614 static void fill_port_desc(struct net_bridge_port *p, struct ofp_phy_port *desc)
615 {
616         desc->port_no = htons(p->port_no);
617         strncpy(desc->name, p->dev->name, OFP_MAX_PORT_NAME_LEN);
618         desc->name[OFP_MAX_PORT_NAME_LEN-1] = '\0';
619         memcpy(desc->hw_addr, p->dev->dev_addr, ETH_ALEN);
620         desc->flags = htonl(p->flags);
621         desc->features = 0;
622         desc->speed = 0;
623
624 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,24)
625         if (p->dev->ethtool_ops && p->dev->ethtool_ops->get_settings) {
626                 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
627
628                 if (!p->dev->ethtool_ops->get_settings(p->dev, &ecmd)) {
629                         if (ecmd.supported & SUPPORTED_10baseT_Half) 
630                                 desc->features |= OFPPF_10MB_HD;
631                         if (ecmd.supported & SUPPORTED_10baseT_Full)
632                                 desc->features |= OFPPF_10MB_FD;
633                         if (ecmd.supported & SUPPORTED_100baseT_Half) 
634                                 desc->features |= OFPPF_100MB_HD;
635                         if (ecmd.supported & SUPPORTED_100baseT_Full)
636                                 desc->features |= OFPPF_100MB_FD;
637                         if (ecmd.supported & SUPPORTED_1000baseT_Half)
638                                 desc->features |= OFPPF_1GB_HD;
639                         if (ecmd.supported & SUPPORTED_1000baseT_Full)
640                                 desc->features |= OFPPF_1GB_FD;
641                         /* 10Gbps half-duplex doesn't exist... */
642                         if (ecmd.supported & SUPPORTED_10000baseT_Full)
643                                 desc->features |= OFPPF_10GB_FD;
644
645                         desc->features = htonl(desc->features);
646                         desc->speed = htonl(ecmd.speed);
647                 }
648         }
649 #endif
650 }
651
652 static int 
653 fill_features_reply(struct datapath *dp, struct ofp_switch_features *ofr)
654 {
655         struct net_bridge_port *p;
656         int port_count = 0;
657
658         ofr->datapath_id    = cpu_to_be64(dp->id); 
659
660         ofr->n_exact        = htonl(2 * TABLE_HASH_MAX_FLOWS);
661         ofr->n_compression  = 0;                                           /* Not supported */
662         ofr->n_general      = htonl(TABLE_LINEAR_MAX_FLOWS);
663         ofr->buffer_mb      = htonl(UINT32_MAX);
664         ofr->n_buffers      = htonl(N_PKT_BUFFERS);
665         ofr->capabilities   = htonl(OFP_SUPPORTED_CAPABILITIES);
666         ofr->actions        = htonl(OFP_SUPPORTED_ACTIONS);
667
668         list_for_each_entry_rcu (p, &dp->port_list, node) {
669                 fill_port_desc(p, &ofr->ports[port_count]);
670                 port_count++;
671         }
672
673         return port_count;
674 }
675
676 int
677 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
678 {
679         struct sk_buff *skb;
680         struct ofp_switch_features *ofr;
681         size_t ofr_len, port_max_len;
682         int port_count;
683
684         /* Overallocate. */
685         port_max_len = sizeof(struct ofp_phy_port) * OFPP_MAX;
686         ofr = alloc_openflow_skb(dp, sizeof(*ofr) + port_max_len,
687                                  OFPT_FEATURES_REPLY, sender, &skb);
688         if (!ofr)
689                 return -ENOMEM;
690
691         /* Fill. */
692         port_count = fill_features_reply(dp, ofr);
693
694         /* Shrink to fit. */
695         ofr_len = sizeof(*ofr) + (sizeof(struct ofp_phy_port) * port_count);
696         resize_openflow_skb(skb, &ofr->header, ofr_len);
697         return send_openflow_skb(skb, sender);
698 }
699
700 int
701 dp_send_config_reply(struct datapath *dp, const struct sender *sender)
702 {
703         struct sk_buff *skb;
704         struct ofp_switch_config *osc;
705
706         osc = alloc_openflow_skb(dp, sizeof *osc, OFPT_GET_CONFIG_REPLY, sender,
707                                  &skb);
708         if (!osc)
709                 return -ENOMEM;
710
711         osc->flags = htons(dp->flags);
712         osc->miss_send_len = htons(dp->miss_send_len);
713
714         return send_openflow_skb(skb, sender);
715 }
716
717 int
718 dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp)
719 {
720         struct net_bridge_port *p;
721
722         p = dp->ports[htons(opp->port_no)];
723
724         /* Make sure the port id hasn't changed since this was sent */
725         if (!p || memcmp(opp->hw_addr, p->dev->dev_addr, ETH_ALEN) != 0) 
726                 return -1;
727         
728         p->flags = htonl(opp->flags);
729
730         return 0;
731 }
732
733
734 static int
735 send_port_status(struct net_bridge_port *p, uint8_t status)
736 {
737         struct sk_buff *skb;
738         struct ofp_port_status *ops;
739
740         ops = alloc_openflow_skb(p->dp, sizeof *ops, OFPT_PORT_STATUS, NULL,
741                                  &skb);
742         if (!ops)
743                 return -ENOMEM;
744         ops->reason = status;
745         fill_port_desc(p, &ops->desc);
746
747         return send_openflow_skb(skb, NULL);
748 }
749
750 int 
751 dp_send_flow_expired(struct datapath *dp, struct sw_flow *flow)
752 {
753         struct sk_buff *skb;
754         struct ofp_flow_expired *ofe;
755         unsigned long duration_j;
756
757         ofe = alloc_openflow_skb(dp, sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &skb);
758         if (!ofe)
759                 return -ENOMEM;
760
761         flow_fill_match(&ofe->match, &flow->key);
762         duration_j = (flow->timeout - HZ * flow->max_idle) - flow->init_time;
763         ofe->duration   = htonl(duration_j / HZ);
764         ofe->packet_count   = cpu_to_be64(flow->packet_count);
765         ofe->byte_count     = cpu_to_be64(flow->byte_count);
766         return send_openflow_skb(skb, NULL);
767 }
768
769 static void
770 fill_flow_stats(struct ofp_flow_stats *ofs, struct sw_flow *flow,
771                 int table_idx)
772 {
773         ofs->match.wildcards = htons(flow->key.wildcards);
774         ofs->match.in_port   = flow->key.in_port;
775         memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
776         memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
777         ofs->match.dl_vlan   = flow->key.dl_vlan;
778         ofs->match.dl_type   = flow->key.dl_type;
779         ofs->match.nw_src    = flow->key.nw_src;
780         ofs->match.nw_dst    = flow->key.nw_dst;
781         ofs->match.nw_proto  = flow->key.nw_proto;
782         memset(ofs->match.pad, 0, sizeof ofs->match.pad);
783         ofs->match.tp_src    = flow->key.tp_src;
784         ofs->match.tp_dst    = flow->key.tp_dst;
785         ofs->duration        = htonl((jiffies - flow->init_time) / HZ);
786         ofs->priority        = htons(flow->priority);
787         ofs->table_id        = table_idx;
788         ofs->packet_count    = cpu_to_be64(flow->packet_count);
789         ofs->byte_count      = cpu_to_be64(flow->byte_count);
790 }
791
792 int
793 dp_send_flow_stats(struct datapath *dp, const struct sender *sender,
794                    const struct ofp_match *match)
795 {
796         struct sk_buff *skb;
797         struct ofp_flow_stats_reply *fsr;
798         size_t header_size, fudge, flow_size;
799         struct sw_flow_key match_key;
800         int table_idx, n_flows, max_flows;
801
802         header_size = offsetof(struct ofp_flow_stats_reply, flows);
803         fudge = 128;
804         flow_size = sizeof fsr->flows[0];
805         max_flows = (NLMSG_GOODSIZE - header_size - fudge) / flow_size;
806         fsr = alloc_openflow_skb(dp, header_size + max_flows * flow_size,
807                                  OFPT_FLOW_STATS_REPLY, sender, &skb);
808         if (!fsr)
809                 return -ENOMEM;
810
811         n_flows = 0;
812         flow_extract_match(&match_key, match);
813         for (table_idx = 0; table_idx < dp->chain->n_tables; table_idx++) {
814                 struct sw_table *table = dp->chain->tables[table_idx];
815                 struct swt_iterator iter;
816
817                 if (n_flows >= max_flows) {
818                         break;
819                 }
820
821                 if (!table->iterator(table, &iter)) {
822                         if (net_ratelimit())
823                                 printk("iterator failed for table %d\n",
824                                        table_idx);
825                         continue;
826                 }
827
828                 for (; iter.flow; table->iterator_next(&iter)) {
829                         if (flow_matches(&match_key, &iter.flow->key)) {
830                                 fill_flow_stats(&fsr->flows[n_flows],
831                                                 iter.flow, table_idx);
832                                 if (++n_flows >= max_flows) {
833                                         break;
834                                 }
835                         }
836                 }
837                 table->iterator_destroy(&iter);
838         }
839         resize_openflow_skb(skb, &fsr->header,
840                             header_size + flow_size * n_flows);
841         return send_openflow_skb(skb, sender);
842 }
843
844 static int 
845 fill_port_stats_reply(struct datapath *dp, struct ofp_port_stats_reply *psr)
846 {
847         struct net_bridge_port *p;
848         int port_count = 0;
849
850         list_for_each_entry_rcu (p, &dp->port_list, node) {
851                 struct ofp_port_stats *ps = &psr->ports[port_count++];
852                 struct net_device_stats *stats = p->dev->get_stats(p->dev);
853                 ps->port_no = htons(p->port_no);
854                 memset(ps->pad, 0, sizeof ps->pad);
855                 ps->rx_count = cpu_to_be64(stats->rx_packets);
856                 ps->tx_count = cpu_to_be64(stats->tx_packets);
857                 ps->drop_count = cpu_to_be64(stats->rx_dropped
858                                              + stats->tx_dropped);
859         }
860
861         return port_count;
862 }
863
864 int
865 dp_send_port_stats(struct datapath *dp, const struct sender *sender)
866 {
867         struct sk_buff *skb;
868         struct ofp_port_stats_reply *psr;
869         size_t psr_len, port_max_len;
870         int port_count;
871
872         /* Overallocate. */
873         port_max_len = sizeof(struct ofp_port_stats) * OFPP_MAX;
874         psr = alloc_openflow_skb(dp, sizeof *psr + port_max_len,
875                                  OFPT_PORT_STATS_REPLY, sender, &skb);
876         if (!psr)
877                 return -ENOMEM;
878
879         /* Fill. */
880         port_count = fill_port_stats_reply(dp, psr);
881
882         /* Shrink to fit. */
883         psr_len = sizeof *psr + sizeof(struct ofp_port_stats) * port_count;
884         resize_openflow_skb(skb, &psr->header, psr_len);
885         return send_openflow_skb(skb, sender);
886 }
887
888 int
889 dp_send_table_stats(struct datapath *dp, const struct sender *sender)
890 {
891         struct sk_buff *skb;
892         struct ofp_table_stats_reply *tsr;
893         int i, n_tables;
894
895         n_tables = dp->chain->n_tables;
896         tsr = alloc_openflow_skb(dp, (offsetof(struct ofp_table_stats_reply,
897                                                tables)
898                                       + sizeof tsr->tables[0] * n_tables),
899                                  OFPT_TABLE_STATS_REPLY, sender, &skb);
900         if (!tsr)
901                 return -ENOMEM;
902         for (i = 0; i < n_tables; i++) {
903                 struct ofp_table_stats *ots = &tsr->tables[i];
904                 struct sw_table_stats stats;
905                 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
906                 strncpy(ots->name, stats.name, sizeof ots->name);
907                 ots->table_id = i;
908                 ots->pad[0] = ots->pad[1] = 0;
909                 ots->max_entries = htonl(stats.max_flows);
910                 ots->active_count = htonl(stats.n_flows);
911                 ots->matched_count = cpu_to_be64(0); /* FIXME */
912         }
913         return send_openflow_skb(skb, sender);
914 }
915
916 /* Generic Netlink interface.
917  *
918  * See netlink(7) for an introduction to netlink.  See
919  * http://linux-net.osdl.org/index.php/Netlink for more information and
920  * pointers on how to work with netlink and Generic Netlink in the kernel and
921  * in userspace. */
922
923 static struct genl_family dp_genl_family = {
924         .id = GENL_ID_GENERATE,
925         .hdrsize = 0,
926         .name = DP_GENL_FAMILY_NAME,
927         .version = 1,
928         .maxattr = DP_GENL_A_MAX,
929 };
930
931 /* Attribute policy: what each attribute may contain.  */
932 static struct nla_policy dp_genl_policy[DP_GENL_A_MAX + 1] = {
933         [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
934         [DP_GENL_A_MC_GROUP] = { .type = NLA_U32 },
935         [DP_GENL_A_PORTNAME] = { .type = NLA_STRING }
936 };
937
938 static int dp_genl_add(struct sk_buff *skb, struct genl_info *info)
939 {
940         if (!info->attrs[DP_GENL_A_DP_IDX])
941                 return -EINVAL;
942
943         return new_dp(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
944 }
945
946 static struct genl_ops dp_genl_ops_add_dp = {
947         .cmd = DP_GENL_C_ADD_DP,
948         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
949         .policy = dp_genl_policy,
950         .doit = dp_genl_add,
951         .dumpit = NULL,
952 };
953
954 struct datapath *dp_get(int dp_idx)
955 {
956         if (dp_idx < 0 || dp_idx > DP_MAX)
957                 return NULL;
958         return rcu_dereference(dps[dp_idx]);
959 }
960
961 static int dp_genl_del(struct sk_buff *skb, struct genl_info *info)
962 {
963         struct datapath *dp;
964         int err;
965
966         if (!info->attrs[DP_GENL_A_DP_IDX])
967                 return -EINVAL;
968
969         mutex_lock(&dp_mutex);
970         dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX])));
971         if (!dp)
972                 err = -ENOENT;
973         else {
974                 del_dp(dp);
975                 err = 0;
976         }
977         mutex_unlock(&dp_mutex);
978         return err;
979 }
980
981 static struct genl_ops dp_genl_ops_del_dp = {
982         .cmd = DP_GENL_C_DEL_DP,
983         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
984         .policy = dp_genl_policy,
985         .doit = dp_genl_del,
986         .dumpit = NULL,
987 };
988
989 /* Queries a datapath for related information.  Currently the only relevant
990  * information is the datapath's multicast group ID.  Really we want one
991  * multicast group per datapath, but because of locking issues[*] we can't
992  * easily get one.  Thus, every datapath will currently return the same
993  * global multicast group ID, but in the future it would be nice to fix that.
994  *
995  * [*] dp_genl_add, to add a new datapath, is called under the genl_lock
996  *       mutex, and genl_register_mc_group, called to acquire a new multicast
997  *       group ID, also acquires genl_lock, thus deadlock.
998  */
999 static int dp_genl_query(struct sk_buff *skb, struct genl_info *info)
1000 {
1001         struct datapath *dp;
1002         struct sk_buff *ans_skb = NULL;
1003         int dp_idx;
1004         int err = -ENOMEM;
1005
1006         if (!info->attrs[DP_GENL_A_DP_IDX])
1007                 return -EINVAL;
1008
1009         rcu_read_lock();
1010         dp_idx = nla_get_u32((info->attrs[DP_GENL_A_DP_IDX]));
1011         dp = dp_get(dp_idx);
1012         if (!dp)
1013                 err = -ENOENT;
1014         else {
1015                 void *data;
1016                 ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1017                 if (!ans_skb) {
1018                         err = -ENOMEM;
1019                         goto err;
1020                 }
1021                 data = genlmsg_put_reply(ans_skb, info, &dp_genl_family,
1022                                          0, DP_GENL_C_QUERY_DP);
1023                 if (data == NULL) {
1024                         err = -ENOMEM;
1025                         goto err;
1026                 }
1027                 NLA_PUT_U32(ans_skb, DP_GENL_A_DP_IDX, dp_idx);
1028                 NLA_PUT_U32(ans_skb, DP_GENL_A_MC_GROUP, mc_group.id);
1029
1030                 genlmsg_end(ans_skb, data);
1031                 err = genlmsg_reply(ans_skb, info);
1032                 if (!err)
1033                         ans_skb = NULL;
1034         }
1035 err:
1036 nla_put_failure:
1037         if (ans_skb)
1038                 kfree_skb(ans_skb);
1039         rcu_read_unlock();
1040         return err;
1041 }
1042
1043 static struct genl_ops dp_genl_ops_query_dp = {
1044         .cmd = DP_GENL_C_QUERY_DP,
1045         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1046         .policy = dp_genl_policy,
1047         .doit = dp_genl_query,
1048         .dumpit = NULL,
1049 };
1050
1051 static int dp_genl_add_del_port(struct sk_buff *skb, struct genl_info *info)
1052 {
1053         struct datapath *dp;
1054         struct net_device *port;
1055         int err;
1056
1057         if (!info->attrs[DP_GENL_A_DP_IDX] || !info->attrs[DP_GENL_A_PORTNAME])
1058                 return -EINVAL;
1059
1060         /* Get datapath. */
1061         mutex_lock(&dp_mutex);
1062         dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1063         if (!dp) {
1064                 err = -ENOENT;
1065                 goto out;
1066         }
1067
1068         /* Get interface to add/remove. */
1069         port = dev_get_by_name(&init_net, 
1070                         nla_data(info->attrs[DP_GENL_A_PORTNAME]));
1071         if (!port) {
1072                 err = -ENOENT;
1073                 goto out;
1074         }
1075
1076         /* Execute operation. */
1077         if (info->genlhdr->cmd == DP_GENL_C_ADD_PORT)
1078                 err = add_switch_port(dp, port);
1079         else {
1080                 if (port->br_port == NULL || port->br_port->dp != dp) {
1081                         err = -ENOENT;
1082                         goto out_put;
1083                 }
1084                 err = del_switch_port(port->br_port);
1085         }
1086
1087 out_put:
1088         dev_put(port);
1089 out:
1090         mutex_unlock(&dp_mutex);
1091         return err;
1092 }
1093
1094 static struct genl_ops dp_genl_ops_add_port = {
1095         .cmd = DP_GENL_C_ADD_PORT,
1096         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1097         .policy = dp_genl_policy,
1098         .doit = dp_genl_add_del_port,
1099         .dumpit = NULL,
1100 };
1101
1102 static struct genl_ops dp_genl_ops_del_port = {
1103         .cmd = DP_GENL_C_DEL_PORT,
1104         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1105         .policy = dp_genl_policy,
1106         .doit = dp_genl_add_del_port,
1107         .dumpit = NULL,
1108 };
1109
1110 static int dp_genl_openflow(struct sk_buff *skb, struct genl_info *info)
1111 {
1112         struct nlattr *va = info->attrs[DP_GENL_A_OPENFLOW];
1113         struct datapath *dp;
1114         struct ofp_header *oh;
1115         struct sender sender;
1116         int err;
1117
1118         if (!info->attrs[DP_GENL_A_DP_IDX] || !va)
1119                 return -EINVAL;
1120
1121         rcu_read_lock();
1122         dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1123         if (!dp) {
1124                 err = -ENOENT;
1125                 goto out;
1126         }
1127
1128         if (nla_len(va) < sizeof(struct ofp_header)) {
1129                 err = -EINVAL;
1130                 goto out;
1131         }
1132         oh = nla_data(va);
1133
1134         sender.xid = oh->xid;
1135         sender.pid = info->snd_pid;
1136         sender.seq = info->snd_seq;
1137         err = fwd_control_input(dp->chain, &sender, nla_data(va), nla_len(va));
1138
1139 out:
1140         rcu_read_unlock();
1141         return err;
1142 }
1143
1144 static struct nla_policy dp_genl_openflow_policy[DP_GENL_A_MAX + 1] = {
1145         [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1146 };
1147
1148 static struct genl_ops dp_genl_ops_openflow = {
1149         .cmd = DP_GENL_C_OPENFLOW,
1150         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1151         .policy = dp_genl_openflow_policy,
1152         .doit = dp_genl_openflow,
1153         .dumpit = NULL,
1154 };
1155
1156 static struct nla_policy dp_genl_benchmark_policy[DP_GENL_A_MAX + 1] = {
1157         [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1158         [DP_GENL_A_NPACKETS] = { .type = NLA_U32 },
1159         [DP_GENL_A_PSIZE] = { .type = NLA_U32 },
1160 };
1161
1162 static struct genl_ops dp_genl_ops_benchmark_nl = {
1163         .cmd = DP_GENL_C_BENCHMARK_NL,
1164         .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1165         .policy = dp_genl_benchmark_policy,
1166         .doit = dp_genl_benchmark_nl,
1167         .dumpit = NULL,
1168 };
1169
1170 static struct genl_ops *dp_genl_all_ops[] = {
1171         /* Keep this operation first.  Generic Netlink dispatching
1172          * looks up operations with linear search, so we want it at the
1173          * front. */
1174         &dp_genl_ops_openflow,
1175
1176         &dp_genl_ops_add_dp,
1177         &dp_genl_ops_del_dp,
1178         &dp_genl_ops_query_dp,
1179         &dp_genl_ops_add_port,
1180         &dp_genl_ops_del_port,
1181         &dp_genl_ops_benchmark_nl,
1182 };
1183
1184 static int dp_init_netlink(void)
1185 {
1186         int err;
1187         int i;
1188
1189         err = genl_register_family(&dp_genl_family);
1190         if (err)
1191                 return err;
1192
1193         for (i = 0; i < ARRAY_SIZE(dp_genl_all_ops); i++) {
1194                 err = genl_register_ops(&dp_genl_family, dp_genl_all_ops[i]);
1195                 if (err)
1196                         goto err_unregister;
1197         }
1198
1199         strcpy(mc_group.name, "openflow");
1200         err = genl_register_mc_group(&dp_genl_family, &mc_group);
1201         if (err < 0)
1202                 goto err_unregister;
1203
1204         return 0;
1205
1206 err_unregister:
1207         genl_unregister_family(&dp_genl_family);
1208                 return err;
1209 }
1210
1211 static void dp_uninit_netlink(void)
1212 {
1213         genl_unregister_family(&dp_genl_family);
1214 }
1215
1216 #define DRV_NAME                "openflow"
1217 #define DRV_VERSION      VERSION
1218 #define DRV_DESCRIPTION "OpenFlow switching datapath implementation"
1219 #define DRV_COPYRIGHT   "Copyright (c) 2007, 2008 The Board of Trustees of The Leland Stanford Junior University"
1220
1221
1222 static int __init dp_init(void)
1223 {
1224         int err;
1225
1226         printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION "\n");
1227         printk(KERN_INFO DRV_NAME ": " VERSION" built on "__DATE__" "__TIME__"\n");
1228         printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
1229
1230         err = flow_init();
1231         if (err)
1232                 goto error;
1233
1234         err = dp_init_netlink();
1235         if (err)
1236                 goto error_flow_exit;
1237
1238         /* Hook into callback used by the bridge to intercept packets.
1239          * Parasites we are. */
1240         if (br_handle_frame_hook)
1241                 printk("openflow: hijacking bridge hook\n");
1242         br_handle_frame_hook = dp_frame_hook;
1243
1244         return 0;
1245
1246 error_flow_exit:
1247         flow_exit();
1248 error:
1249         printk(KERN_EMERG "openflow: failed to install!");
1250         return err;
1251 }
1252
1253 static void dp_cleanup(void)
1254 {
1255         fwd_exit();
1256         dp_uninit_netlink();
1257         flow_exit();
1258         br_handle_frame_hook = NULL;
1259 }
1260
1261 module_init(dp_init);
1262 module_exit(dp_cleanup);
1263
1264 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1265 MODULE_AUTHOR(DRV_COPYRIGHT);
1266 MODULE_LICENSE("GPL");