2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
7 /* Functions for managing the dp interface/device. */
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/if_arp.h>
12 #include <linux/if_bridge.h>
13 #include <linux/if_vlan.h>
15 #include <net/genetlink.h>
17 #include <linux/delay.h>
18 #include <linux/etherdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/mutex.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/rcupdate.h>
24 #include <linux/version.h>
25 #include <linux/ethtool.h>
26 #include <linux/random.h>
27 #include <asm/system.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/inetdevice.h>
30 #include <linux/list.h>
32 #include "openflow-netlink.h"
38 #include "datapath_t.h"
43 /* Number of milliseconds between runs of the maintenance thread. */
44 #define MAINT_SLEEP_MSECS 1000
46 #define BRIDGE_PORT_NO_FLOOD 0x00000001
48 #define UINT32_MAX 4294967295U
49 #define UINT16_MAX 65535
50 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
52 struct net_bridge_port {
56 struct net_device *dev;
57 struct list_head node; /* Element in datapath.ports. */
60 static struct genl_family dp_genl_family;
61 static struct genl_multicast_group mc_group;
63 int dp_dev_setup(struct net_device *dev);
65 /* It's hard to imagine wanting more than one datapath, but... */
68 /* datapaths. Protected on the read side by rcu_read_lock, on the write side
71 * It is safe to access the datapath and net_bridge_port structures with just
72 * the dp_mutex, but to access the chain you need to take the rcu_read_lock
73 * also (because dp_mutex doesn't prevent flows from being destroyed).
75 static struct datapath *dps[DP_MAX];
76 static DEFINE_MUTEX(dp_mutex);
78 static int dp_maint_func(void *data);
79 static int send_port_status(struct net_bridge_port *p, uint8_t status);
80 static int dp_genl_openflow_done(struct netlink_callback *);
82 /* nla_shrink - reduce amount of space reserved by nla_reserve
83 * @skb: socket buffer from which to recover room
84 * @nla: netlink attribute to adjust
85 * @len: new length of attribute payload
87 * Reduces amount of space reserved by a call to nla_reserve.
89 * No other attributes may be added between calling nla_reserve and this
90 * function, since it will create a hole in the message.
92 void nla_shrink(struct sk_buff *skb, struct nlattr *nla, int len)
94 int delta = nla_total_size(len) - nla_total_size(nla_len(nla));
98 nla->nla_len = nla_attr_size(len);
101 /* Puts a set of openflow headers for a message of the given 'type' into 'skb'.
102 * If 'sender' is nonnull, then it is used as the message's destination. 'dp'
103 * must specify the datapath to use.
105 * '*max_openflow_len' receives the maximum number of bytes that are available
106 * for the embedded OpenFlow message. The caller must call
107 * resize_openflow_skb() to set the actual size of the message to this number
110 * Returns the openflow header if successful, otherwise (if 'skb' is too small)
113 put_openflow_headers(struct datapath *dp, struct sk_buff *skb, uint8_t type,
114 const struct sender *sender, int *max_openflow_len)
116 struct ofp_header *oh;
120 /* Assemble the Generic Netlink wrapper. */
121 if (!genlmsg_put(skb,
122 sender ? sender->pid : 0,
123 sender ? sender->seq : 0,
124 &dp_genl_family, 0, DP_GENL_C_OPENFLOW))
125 return ERR_PTR(-ENOBUFS);
126 if (nla_put_u32(skb, DP_GENL_A_DP_IDX, dp->dp_idx) < 0)
127 return ERR_PTR(-ENOBUFS);
128 openflow_len = (skb_tailroom(skb) - NLA_HDRLEN) & ~(NLA_ALIGNTO - 1);
129 if (openflow_len < sizeof *oh)
130 return ERR_PTR(-ENOBUFS);
131 *max_openflow_len = openflow_len;
132 attr = nla_reserve(skb, DP_GENL_A_OPENFLOW, openflow_len);
135 /* Fill in the header. The caller is responsible for the length. */
137 oh->version = OFP_VERSION;
139 oh->xid = sender ? sender->xid : 0;
144 /* Resizes OpenFlow header 'oh', which must be at the tail end of 'skb', to new
145 * length 'new_length' (in bytes), adjusting pointers and size values as
148 resize_openflow_skb(struct sk_buff *skb,
149 struct ofp_header *oh, size_t new_length)
151 struct nlattr *attr = ((void *) oh) - NLA_HDRLEN;
152 nla_shrink(skb, attr, new_length);
153 oh->length = htons(new_length);
154 nlmsg_end(skb, (struct nlmsghdr *) skb->data);
157 /* Allocates a new skb to contain an OpenFlow message 'openflow_len' bytes in
158 * length. Returns a null pointer if memory is unavailable, otherwise returns
159 * the OpenFlow header and stores a pointer to the skb in '*pskb'.
161 * 'type' is the OpenFlow message type. If 'sender' is nonnull, then it is
162 * used as the message's destination. 'dp' must specify the datapath to
165 alloc_openflow_skb(struct datapath *dp, size_t openflow_len, uint8_t type,
166 const struct sender *sender, struct sk_buff **pskb)
168 struct ofp_header *oh;
171 int max_openflow_len;
173 if ((openflow_len + sizeof(struct ofp_header)) > UINT16_MAX) {
175 printk("alloc_openflow_skb: openflow message too large: %zu\n",
180 genl_len = nlmsg_total_size(GENL_HDRLEN + dp_genl_family.hdrsize);
181 genl_len += nla_total_size(sizeof(uint32_t)); /* DP_GENL_A_DP_IDX */
182 genl_len += nla_total_size(openflow_len); /* DP_GENL_A_OPENFLOW */
183 skb = *pskb = genlmsg_new(genl_len, GFP_ATOMIC);
186 printk("alloc_openflow_skb: genlmsg_new failed\n");
190 oh = put_openflow_headers(dp, skb, type, sender, &max_openflow_len);
191 BUG_ON(!oh || IS_ERR(oh));
192 resize_openflow_skb(skb, oh, openflow_len);
197 /* Sends 'skb' to 'sender' if it is nonnull, otherwise multicasts 'skb' to all
200 send_openflow_skb(struct sk_buff *skb, const struct sender *sender)
203 ? genlmsg_unicast(skb, sender->pid)
204 : genlmsg_multicast(skb, 0, mc_group.id, GFP_ATOMIC));
205 if (err && net_ratelimit())
206 printk(KERN_WARNING "send_openflow_skb: send failed: %d\n",
211 /* Generates a unique datapath id. It incorporates the datapath index
212 * and a hardware address, if available. If not, it generates a random
216 uint64_t gen_datapath_id(uint16_t dp_idx)
220 struct net_device *dev;
222 /* The top 16 bits are used to identify the datapath. The lower 48 bits
223 * use an interface address. */
224 id = (uint64_t)dp_idx << 48;
225 if ((dev = dev_get_by_name(&init_net, "ctl0"))
226 || (dev = dev_get_by_name(&init_net, "eth0"))) {
227 for (i=0; i<ETH_ALEN; i++) {
228 id |= (uint64_t)dev->dev_addr[i] << (8*(ETH_ALEN-1 - i));
232 /* Randomly choose the lower 48 bits if we cannot find an
233 * address and mark the most significant bit to indicate that
234 * this was randomly generated. */
235 uint8_t rand[ETH_ALEN];
236 get_random_bytes(rand, ETH_ALEN);
237 id |= (uint64_t)1 << 63;
238 for (i=0; i<ETH_ALEN; i++) {
239 id |= (uint64_t)rand[i] << (8*(ETH_ALEN-1 - i));
246 /* Creates a new datapath numbered 'dp_idx'. Returns 0 for success or a
247 * negative error code.
249 * Not called with any locks. */
250 static int new_dp(int dp_idx)
255 if (dp_idx < 0 || dp_idx >= DP_MAX)
258 if (!try_module_get(THIS_MODULE))
261 mutex_lock(&dp_mutex);
262 dp = rcu_dereference(dps[dp_idx]);
269 dp = kzalloc(sizeof *dp, GFP_KERNEL);
274 dp->id = gen_datapath_id(dp_idx);
275 dp->chain = chain_create(dp);
276 if (dp->chain == NULL)
278 INIT_LIST_HEAD(&dp->port_list);
281 /* Setup our "of" device */
284 err = dp_dev_setup(&dp->dev);
287 printk("datapath: problem setting up 'of' device\n");
291 dp->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
293 dp->dp_task = kthread_run(dp_maint_func, dp, "dp%d", dp_idx);
294 if (IS_ERR(dp->dp_task))
297 rcu_assign_pointer(dps[dp_idx], dp);
298 mutex_unlock(&dp_mutex);
305 mutex_unlock(&dp_mutex);
306 module_put(THIS_MODULE);
310 /* Find and return a free port number under 'dp'. Called under dp_mutex. */
311 static int find_portno(struct datapath *dp)
314 for (i = 0; i < OFPP_MAX; i++)
315 if (dp->ports[i] == NULL)
320 static struct net_bridge_port *new_nbp(struct datapath *dp,
321 struct net_device *dev)
323 struct net_bridge_port *p;
326 port_no = find_portno(dp);
328 return ERR_PTR(port_no);
330 p = kzalloc(sizeof(*p), GFP_KERNEL);
332 return ERR_PTR(-ENOMEM);
337 p->port_no = port_no;
342 /* Called with dp_mutex. */
343 int add_switch_port(struct datapath *dp, struct net_device *dev)
345 struct net_bridge_port *p;
347 if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
350 if (dev->br_port != NULL)
353 p = new_nbp(dp, dev);
358 rcu_assign_pointer(dev->br_port, p);
360 dev_set_promiscuity(dev, 1);
363 rcu_assign_pointer(dp->ports[p->port_no], p);
364 list_add_rcu(&p->node, &dp->port_list);
366 /* Notify the ctlpath that this port has been added */
367 send_port_status(p, OFPPR_ADD);
372 /* Delete 'p' from switch.
373 * Called with dp_mutex. */
374 static int del_switch_port(struct net_bridge_port *p)
376 /* First drop references to device. */
378 dev_set_promiscuity(p->dev, -1);
380 list_del_rcu(&p->node);
381 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
382 rcu_assign_pointer(p->dev->br_port, NULL);
384 /* Then wait until no one is still using it, and destroy it. */
387 /* Notify the ctlpath that this port no longer exists */
388 send_port_status(p, OFPPR_DELETE);
396 /* Called with dp_mutex. */
397 static void del_dp(struct datapath *dp)
399 struct net_bridge_port *p, *n;
402 /* Unregister the "of" device of this dp */
404 unregister_netdevice(&dp->dev);
408 kthread_stop(dp->dp_task);
410 /* Drop references to DP. */
411 list_for_each_entry_safe (p, n, &dp->port_list, node)
413 rcu_assign_pointer(dps[dp->dp_idx], NULL);
415 /* Wait until no longer in use, then destroy it. */
417 chain_destroy(dp->chain);
419 module_put(THIS_MODULE);
422 static int dp_maint_func(void *data)
424 struct datapath *dp = (struct datapath *) data;
426 while (!kthread_should_stop()) {
428 chain_timeout(dp->chain);
430 int count = chain_timeout(dp->chain);
431 chain_print_stats(dp->chain);
433 printk("%d flows timed out\n", count);
435 msleep_interruptible(MAINT_SLEEP_MSECS);
442 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
443 * different set of devices!) Returns 0 if *pskb should be processed further,
444 * 1 if *pskb is handled. */
445 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
446 /* Called with rcu_read_lock. */
447 static struct sk_buff *dp_frame_hook(struct net_bridge_port *p,
450 struct ethhdr *eh = eth_hdr(skb);
451 struct sk_buff *skb_local = NULL;
454 if (compare_ether_addr(eh->h_dest, skb->dev->dev_addr) == 0)
457 if (is_broadcast_ether_addr(eh->h_dest)
458 || is_multicast_ether_addr(eh->h_dest)
459 || is_local_ether_addr(eh->h_dest))
460 skb_local = skb_clone(skb, GFP_ATOMIC);
462 /* Push the Ethernet header back on. */
463 if (skb->protocol == htons(ETH_P_8021Q))
464 skb_push(skb, VLAN_ETH_HLEN);
466 skb_push(skb, ETH_HLEN);
468 fwd_port_input(p->dp->chain, skb, p->port_no);
472 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
473 static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
475 /* Push the Ethernet header back on. */
476 if ((*pskb)->protocol == htons(ETH_P_8021Q))
477 skb_push(*pskb, VLAN_ETH_HLEN);
479 skb_push(*pskb, ETH_HLEN);
481 fwd_port_input(p->dp->chain, *pskb, p->port_no);
485 /* NB: This has only been tested on 2.4.35 */
487 /* Called without any locks (?) */
488 static void dp_frame_hook(struct sk_buff *skb)
490 struct net_bridge_port *p = skb->dev->br_port;
492 /* Push the Ethernet header back on. */
493 if (skb->protocol == htons(ETH_P_8021Q))
494 skb_push(skb, VLAN_ETH_HLEN);
496 skb_push(skb, ETH_HLEN);
500 fwd_port_input(p->dp->chain, skb, p->port_no);
507 /* Forwarding output path.
508 * Based on net/bridge/br_forward.c. */
510 /* Don't forward packets to originating port or with flooding disabled */
511 static inline int should_deliver(const struct net_bridge_port *p,
512 const struct sk_buff *skb)
514 if ((skb->dev == p->dev) || (p->flags & BRIDGE_PORT_NO_FLOOD)) {
521 static inline unsigned packet_length(const struct sk_buff *skb)
523 int length = skb->len - ETH_HLEN;
524 if (skb->protocol == htons(ETH_P_8021Q))
530 flood(struct datapath *dp, struct sk_buff *skb)
532 struct net_bridge_port *p;
536 list_for_each_entry_rcu (p, &dp->port_list, node) {
537 if (!should_deliver(p, skb))
539 if (prev_port != -1) {
540 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
545 dp_output_port(dp, clone, prev_port);
547 prev_port = p->port_no;
550 dp_output_port(dp, skb, prev_port);
557 /* Marks 'skb' as having originated from 'in_port' in 'dp'.
558 FIXME: how are devices reference counted? */
559 int dp_set_origin(struct datapath *dp, uint16_t in_port,
562 if (in_port < OFPP_MAX && dp->ports[in_port]) {
563 skb->dev = dp->ports[in_port]->dev;
569 /* Takes ownership of 'skb' and transmits it to 'out_port' on 'dp'.
571 int dp_output_port(struct datapath *dp, struct sk_buff *skb, int out_port)
573 struct net_bridge_port *p;
577 if (out_port == OFPP_FLOOD)
578 return flood(dp, skb);
579 else if (out_port == OFPP_CONTROLLER)
580 return dp_output_control(dp, skb, fwd_save_skb(skb), 0,
582 else if (out_port == OFPP_TABLE) {
583 struct sw_flow_key key;
584 struct sw_flow *flow;
586 flow_extract(skb, skb->dev->br_port->port_no, &key);
587 flow = chain_lookup(dp->chain, &key);
588 if (likely(flow != NULL)) {
589 flow_used(flow, skb);
590 execute_actions(dp, skb, &key, flow->actions, flow->n_actions);
594 } else if (out_port >= OFPP_MAX)
597 p = dp->ports[out_port];
602 if (packet_length(skb) > skb->dev->mtu) {
603 printk("dropped over-mtu packet: %d > %d\n",
604 packet_length(skb), skb->dev->mtu);
616 printk("can't forward to bad port %d\n", out_port);
620 /* Takes ownership of 'skb' and transmits it to 'dp''s control path. If
621 * 'buffer_id' != -1, then only the first 64 bytes of 'skb' are sent;
622 * otherwise, all of 'skb' is sent. 'reason' indicates why 'skb' is being
623 * sent. 'max_len' sets the maximum number of bytes that the caller
624 * wants to be sent; a value of 0 indicates the entire packet should be
627 dp_output_control(struct datapath *dp, struct sk_buff *skb,
628 uint32_t buffer_id, size_t max_len, int reason)
630 /* FIXME? Can we avoid creating a new skbuff in the case where we
631 * forward the whole packet? */
632 struct sk_buff *f_skb;
633 struct ofp_packet_in *opi;
634 size_t fwd_len, opi_len;
638 if ((buffer_id != (uint32_t) -1) && max_len)
639 fwd_len = min(fwd_len, max_len);
641 opi_len = offsetof(struct ofp_packet_in, data) + fwd_len;
642 opi = alloc_openflow_skb(dp, opi_len, OFPT_PACKET_IN, NULL, &f_skb);
647 opi->buffer_id = htonl(buffer_id);
648 opi->total_len = htons(skb->len);
649 opi->in_port = htons(skb->dev->br_port->port_no);
650 opi->reason = reason;
652 memcpy(opi->data, skb_mac_header(skb), fwd_len);
653 err = send_openflow_skb(f_skb, NULL);
660 static void fill_port_desc(struct net_bridge_port *p, struct ofp_phy_port *desc)
662 desc->port_no = htons(p->port_no);
663 strncpy(desc->name, p->dev->name, OFP_MAX_PORT_NAME_LEN);
664 desc->name[OFP_MAX_PORT_NAME_LEN-1] = '\0';
665 memcpy(desc->hw_addr, p->dev->dev_addr, ETH_ALEN);
666 desc->flags = htonl(p->flags);
670 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,24)
671 if (p->dev->ethtool_ops && p->dev->ethtool_ops->get_settings) {
672 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
674 if (!p->dev->ethtool_ops->get_settings(p->dev, &ecmd)) {
675 if (ecmd.supported & SUPPORTED_10baseT_Half)
676 desc->features |= OFPPF_10MB_HD;
677 if (ecmd.supported & SUPPORTED_10baseT_Full)
678 desc->features |= OFPPF_10MB_FD;
679 if (ecmd.supported & SUPPORTED_100baseT_Half)
680 desc->features |= OFPPF_100MB_HD;
681 if (ecmd.supported & SUPPORTED_100baseT_Full)
682 desc->features |= OFPPF_100MB_FD;
683 if (ecmd.supported & SUPPORTED_1000baseT_Half)
684 desc->features |= OFPPF_1GB_HD;
685 if (ecmd.supported & SUPPORTED_1000baseT_Full)
686 desc->features |= OFPPF_1GB_FD;
687 /* 10Gbps half-duplex doesn't exist... */
688 if (ecmd.supported & SUPPORTED_10000baseT_Full)
689 desc->features |= OFPPF_10GB_FD;
691 desc->features = htonl(desc->features);
692 desc->speed = htonl(ecmd.speed);
699 fill_features_reply(struct datapath *dp, struct ofp_switch_features *ofr)
701 struct net_bridge_port *p;
704 ofr->datapath_id = cpu_to_be64(dp->id);
706 ofr->n_exact = htonl(2 * TABLE_HASH_MAX_FLOWS);
707 ofr->n_compression = 0; /* Not supported */
708 ofr->n_general = htonl(TABLE_LINEAR_MAX_FLOWS);
709 ofr->buffer_mb = htonl(UINT32_MAX);
710 ofr->n_buffers = htonl(N_PKT_BUFFERS);
711 ofr->capabilities = htonl(OFP_SUPPORTED_CAPABILITIES);
712 ofr->actions = htonl(OFP_SUPPORTED_ACTIONS);
714 list_for_each_entry_rcu (p, &dp->port_list, node) {
715 fill_port_desc(p, &ofr->ports[port_count]);
723 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
726 struct ofp_switch_features *ofr;
727 size_t ofr_len, port_max_len;
731 port_max_len = sizeof(struct ofp_phy_port) * OFPP_MAX;
732 ofr = alloc_openflow_skb(dp, sizeof(*ofr) + port_max_len,
733 OFPT_FEATURES_REPLY, sender, &skb);
738 port_count = fill_features_reply(dp, ofr);
741 ofr_len = sizeof(*ofr) + (sizeof(struct ofp_phy_port) * port_count);
742 resize_openflow_skb(skb, &ofr->header, ofr_len);
743 return send_openflow_skb(skb, sender);
747 dp_send_config_reply(struct datapath *dp, const struct sender *sender)
750 struct ofp_switch_config *osc;
752 osc = alloc_openflow_skb(dp, sizeof *osc, OFPT_GET_CONFIG_REPLY, sender,
757 osc->flags = htons(dp->flags);
758 osc->miss_send_len = htons(dp->miss_send_len);
760 return send_openflow_skb(skb, sender);
764 dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp)
766 struct net_bridge_port *p;
768 p = dp->ports[htons(opp->port_no)];
770 /* Make sure the port id hasn't changed since this was sent */
771 if (!p || memcmp(opp->hw_addr, p->dev->dev_addr, ETH_ALEN) != 0)
774 p->flags = htonl(opp->flags);
781 send_port_status(struct net_bridge_port *p, uint8_t status)
784 struct ofp_port_status *ops;
786 ops = alloc_openflow_skb(p->dp, sizeof *ops, OFPT_PORT_STATUS, NULL,
790 ops->reason = status;
791 memset(ops->pad, 0, sizeof ops->pad);
792 fill_port_desc(p, &ops->desc);
794 return send_openflow_skb(skb, NULL);
798 dp_send_flow_expired(struct datapath *dp, struct sw_flow *flow)
801 struct ofp_flow_expired *ofe;
802 unsigned long duration_j;
804 ofe = alloc_openflow_skb(dp, sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &skb);
808 flow_fill_match(&ofe->match, &flow->key);
810 memset(ofe->pad, 0, sizeof ofe->pad);
811 ofe->priority = htons(flow->priority);
813 duration_j = (flow->timeout - HZ * flow->max_idle) - flow->init_time;
814 ofe->duration = htonl(duration_j / HZ);
815 ofe->packet_count = cpu_to_be64(flow->packet_count);
816 ofe->byte_count = cpu_to_be64(flow->byte_count);
818 return send_openflow_skb(skb, NULL);
822 dp_send_error_msg(struct datapath *dp, const struct sender *sender,
823 uint16_t type, uint16_t code, const uint8_t *data, size_t len)
826 struct ofp_error_msg *oem;
829 oem = alloc_openflow_skb(dp, sizeof(*oem)+len, OFPT_ERROR_MSG,
834 oem->type = htons(type);
835 oem->code = htons(code);
836 memcpy(oem->data, data, len);
838 return send_openflow_skb(skb, sender);
841 /* Generic Netlink interface.
843 * See netlink(7) for an introduction to netlink. See
844 * http://linux-net.osdl.org/index.php/Netlink for more information and
845 * pointers on how to work with netlink and Generic Netlink in the kernel and
848 static struct genl_family dp_genl_family = {
849 .id = GENL_ID_GENERATE,
851 .name = DP_GENL_FAMILY_NAME,
853 .maxattr = DP_GENL_A_MAX,
856 /* Attribute policy: what each attribute may contain. */
857 static struct nla_policy dp_genl_policy[DP_GENL_A_MAX + 1] = {
858 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
859 [DP_GENL_A_MC_GROUP] = { .type = NLA_U32 },
860 [DP_GENL_A_PORTNAME] = { .type = NLA_STRING }
863 static int dp_genl_add(struct sk_buff *skb, struct genl_info *info)
865 if (!info->attrs[DP_GENL_A_DP_IDX])
868 return new_dp(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
871 static struct genl_ops dp_genl_ops_add_dp = {
872 .cmd = DP_GENL_C_ADD_DP,
873 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
874 .policy = dp_genl_policy,
879 struct datapath *dp_get(int dp_idx)
881 if (dp_idx < 0 || dp_idx > DP_MAX)
883 return rcu_dereference(dps[dp_idx]);
886 static int dp_genl_del(struct sk_buff *skb, struct genl_info *info)
891 if (!info->attrs[DP_GENL_A_DP_IDX])
894 mutex_lock(&dp_mutex);
895 dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX])));
902 mutex_unlock(&dp_mutex);
906 static struct genl_ops dp_genl_ops_del_dp = {
907 .cmd = DP_GENL_C_DEL_DP,
908 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
909 .policy = dp_genl_policy,
914 /* Queries a datapath for related information. Currently the only relevant
915 * information is the datapath's multicast group ID. Really we want one
916 * multicast group per datapath, but because of locking issues[*] we can't
917 * easily get one. Thus, every datapath will currently return the same
918 * global multicast group ID, but in the future it would be nice to fix that.
920 * [*] dp_genl_add, to add a new datapath, is called under the genl_lock
921 * mutex, and genl_register_mc_group, called to acquire a new multicast
922 * group ID, also acquires genl_lock, thus deadlock.
924 static int dp_genl_query(struct sk_buff *skb, struct genl_info *info)
927 struct sk_buff *ans_skb = NULL;
931 if (!info->attrs[DP_GENL_A_DP_IDX])
935 dp_idx = nla_get_u32((info->attrs[DP_GENL_A_DP_IDX]));
941 ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
946 data = genlmsg_put_reply(ans_skb, info, &dp_genl_family,
947 0, DP_GENL_C_QUERY_DP);
952 NLA_PUT_U32(ans_skb, DP_GENL_A_DP_IDX, dp_idx);
953 NLA_PUT_U32(ans_skb, DP_GENL_A_MC_GROUP, mc_group.id);
955 genlmsg_end(ans_skb, data);
956 err = genlmsg_reply(ans_skb, info);
968 static struct genl_ops dp_genl_ops_query_dp = {
969 .cmd = DP_GENL_C_QUERY_DP,
970 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
971 .policy = dp_genl_policy,
972 .doit = dp_genl_query,
976 static int dp_genl_add_del_port(struct sk_buff *skb, struct genl_info *info)
979 struct net_device *port;
982 if (!info->attrs[DP_GENL_A_DP_IDX] || !info->attrs[DP_GENL_A_PORTNAME])
986 mutex_lock(&dp_mutex);
987 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
993 /* Get interface to add/remove. */
994 port = dev_get_by_name(&init_net,
995 nla_data(info->attrs[DP_GENL_A_PORTNAME]));
1001 /* Execute operation. */
1002 if (info->genlhdr->cmd == DP_GENL_C_ADD_PORT)
1003 err = add_switch_port(dp, port);
1005 if (port->br_port == NULL || port->br_port->dp != dp) {
1009 err = del_switch_port(port->br_port);
1015 mutex_unlock(&dp_mutex);
1019 static struct genl_ops dp_genl_ops_add_port = {
1020 .cmd = DP_GENL_C_ADD_PORT,
1021 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1022 .policy = dp_genl_policy,
1023 .doit = dp_genl_add_del_port,
1027 static struct genl_ops dp_genl_ops_del_port = {
1028 .cmd = DP_GENL_C_DEL_PORT,
1029 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1030 .policy = dp_genl_policy,
1031 .doit = dp_genl_add_del_port,
1035 static int dp_genl_openflow(struct sk_buff *skb, struct genl_info *info)
1037 struct nlattr *va = info->attrs[DP_GENL_A_OPENFLOW];
1038 struct datapath *dp;
1039 struct ofp_header *oh;
1040 struct sender sender;
1043 if (!info->attrs[DP_GENL_A_DP_IDX] || !va)
1047 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1053 if (nla_len(va) < sizeof(struct ofp_header)) {
1059 sender.xid = oh->xid;
1060 sender.pid = info->snd_pid;
1061 sender.seq = info->snd_seq;
1062 err = fwd_control_input(dp->chain, &sender, nla_data(va), nla_len(va));
1069 static struct nla_policy dp_genl_openflow_policy[DP_GENL_A_MAX + 1] = {
1070 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1073 struct flow_stats_state {
1075 struct sw_table_position position;
1076 const struct ofp_flow_stats_request *rq;
1079 int bytes_used, bytes_allocated;
1082 static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
1085 const struct ofp_flow_stats_request *fsr = body;
1086 struct flow_stats_state *s = kmalloc(sizeof *s, GFP_ATOMIC);
1089 s->table_idx = fsr->table_id == 0xff ? 0 : fsr->table_id;
1090 memset(&s->position, 0, sizeof s->position);
1096 static int flow_stats_dump_callback(struct sw_flow *flow, void *private)
1098 struct flow_stats_state *s = private;
1099 struct ofp_flow_stats *ofs;
1103 actions_length = sizeof *ofs->actions * flow->n_actions;
1104 length = sizeof *ofs + sizeof *ofs->actions * flow->n_actions;
1105 if (length + s->bytes_used > s->bytes_allocated)
1108 ofs = s->body + s->bytes_used;
1109 ofs->length = htons(length);
1110 ofs->table_id = s->table_idx;
1112 ofs->match.wildcards = htons(flow->key.wildcards);
1113 ofs->match.in_port = flow->key.in_port;
1114 memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
1115 memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
1116 ofs->match.dl_vlan = flow->key.dl_vlan;
1117 ofs->match.dl_type = flow->key.dl_type;
1118 ofs->match.nw_src = flow->key.nw_src;
1119 ofs->match.nw_dst = flow->key.nw_dst;
1120 ofs->match.nw_proto = flow->key.nw_proto;
1121 memset(ofs->match.pad, 0, sizeof ofs->match.pad);
1122 ofs->match.tp_src = flow->key.tp_src;
1123 ofs->match.tp_dst = flow->key.tp_dst;
1124 ofs->duration = htonl((jiffies - flow->init_time) / HZ);
1125 ofs->packet_count = cpu_to_be64(flow->packet_count);
1126 ofs->byte_count = cpu_to_be64(flow->byte_count);
1127 ofs->priority = htons(flow->priority);
1128 ofs->max_idle = htons(flow->max_idle);
1129 memcpy(ofs->actions, flow->actions, actions_length);
1131 s->bytes_used += length;
1135 static int flow_stats_dump(struct datapath *dp, void *state,
1136 void *body, int *body_len)
1138 struct flow_stats_state *s = state;
1139 struct sw_flow_key match_key;
1143 s->bytes_allocated = *body_len;
1146 flow_extract_match(&match_key, &s->rq->match);
1147 while (s->table_idx < dp->chain->n_tables
1148 && (s->rq->table_id == 0xff || s->rq->table_id == s->table_idx))
1150 struct sw_table *table = dp->chain->tables[s->table_idx];
1152 error = table->iterate(table, &match_key, &s->position,
1153 flow_stats_dump_callback, s);
1158 memset(&s->position, 0, sizeof s->position);
1160 *body_len = s->bytes_used;
1162 /* If error is 0, we're done.
1163 * Otherwise, if some bytes were used, there are more flows to come.
1164 * Otherwise, we were not able to fit even a single flow in the body,
1165 * which indicates that we have a single flow with too many actions to
1166 * fit. We won't ever make any progress at that rate, so give up. */
1167 return !error ? 0 : s->bytes_used ? 1 : -ENOMEM;
1170 static void flow_stats_done(void *state)
1175 static int aggregate_stats_init(struct datapath *dp,
1176 const void *body, int body_len,
1179 *state = (void *)body;
1183 static int aggregate_stats_dump_callback(struct sw_flow *flow, void *private)
1185 struct ofp_aggregate_stats_reply *rpy = private;
1186 rpy->packet_count += flow->packet_count;
1187 rpy->byte_count += flow->byte_count;
1192 static int aggregate_stats_dump(struct datapath *dp, void *state,
1193 void *body, int *body_len)
1195 struct ofp_aggregate_stats_request *rq = state;
1196 struct ofp_aggregate_stats_reply *rpy;
1197 struct sw_table_position position;
1198 struct sw_flow_key match_key;
1201 if (*body_len < sizeof *rpy)
1204 *body_len = sizeof *rpy;
1206 memset(rpy, 0, sizeof *rpy);
1208 flow_extract_match(&match_key, &rq->match);
1209 table_idx = rq->table_id == 0xff ? 0 : rq->table_id;
1210 memset(&position, 0, sizeof position);
1211 while (table_idx < dp->chain->n_tables
1212 && (rq->table_id == 0xff || rq->table_id == table_idx))
1214 struct sw_table *table = dp->chain->tables[table_idx];
1217 error = table->iterate(table, &match_key, &position,
1218 aggregate_stats_dump_callback, rpy);
1223 memset(&position, 0, sizeof position);
1226 rpy->packet_count = cpu_to_be64(rpy->packet_count);
1227 rpy->byte_count = cpu_to_be64(rpy->byte_count);
1228 rpy->flow_count = htonl(rpy->flow_count);
1232 static int table_stats_dump(struct datapath *dp, void *state,
1233 void *body, int *body_len)
1235 struct ofp_table_stats *ots;
1236 int nbytes = dp->chain->n_tables * sizeof *ots;
1238 if (nbytes > *body_len)
1241 for (i = 0, ots = body; i < dp->chain->n_tables; i++, ots++) {
1242 struct sw_table_stats stats;
1243 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
1244 strncpy(ots->name, stats.name, sizeof ots->name);
1246 memset(ots->pad, 0, sizeof ots->pad);
1247 ots->max_entries = htonl(stats.max_flows);
1248 ots->active_count = htonl(stats.n_flows);
1249 ots->matched_count = cpu_to_be64(0); /* FIXME */
1254 struct port_stats_state {
1258 static int port_stats_init(struct datapath *dp, const void *body, int body_len,
1261 struct port_stats_state *s = kmalloc(sizeof *s, GFP_ATOMIC);
1269 static int port_stats_dump(struct datapath *dp, void *state,
1270 void *body, int *body_len)
1272 struct port_stats_state *s = state;
1273 struct ofp_port_stats *ops;
1274 int n_ports, max_ports;
1277 max_ports = *body_len / sizeof *ops;
1283 for (i = s->port; i < OFPP_MAX && n_ports < max_ports; i++) {
1284 struct net_bridge_port *p = dp->ports[i];
1285 struct net_device_stats *stats;
1288 stats = p->dev->get_stats(p->dev);
1289 ops->port_no = htons(p->port_no);
1290 memset(ops->pad, 0, sizeof ops->pad);
1291 ops->rx_count = cpu_to_be64(stats->rx_packets);
1292 ops->tx_count = cpu_to_be64(stats->tx_packets);
1293 ops->drop_count = cpu_to_be64(stats->rx_dropped
1294 + stats->tx_dropped);
1299 *body_len = n_ports * sizeof *ops;
1300 return n_ports >= max_ports;
1303 static void port_stats_done(void *state)
1309 /* Minimum and maximum acceptable number of bytes in body member of
1310 * struct ofp_stats_request. */
1311 size_t min_body, max_body;
1313 /* Prepares to dump some kind of statistics on 'dp'. 'body' and
1314 * 'body_len' are the 'body' member of the struct ofp_stats_request.
1315 * Returns zero if successful, otherwise a negative error code.
1316 * May initialize '*state' to state information. May be null if no
1317 * initialization is required.*/
1318 int (*init)(struct datapath *dp, const void *body, int body_len,
1321 /* Dumps statistics for 'dp' into the '*body_len' bytes at 'body', and
1322 * modifies '*body_len' to reflect the number of bytes actually used.
1323 * ('body' will be transmitted as the 'body' member of struct
1324 * ofp_stats_reply.) */
1325 int (*dump)(struct datapath *dp, void *state,
1326 void *body, int *body_len);
1328 /* Cleans any state created by the init or dump functions. May be null
1329 * if no cleanup is required. */
1330 void (*done)(void *state);
1333 static const struct stats_type stats[] = {
1335 sizeof(struct ofp_flow_stats_request),
1336 sizeof(struct ofp_flow_stats_request),
1341 [OFPST_AGGREGATE] = {
1342 sizeof(struct ofp_aggregate_stats_request),
1343 sizeof(struct ofp_aggregate_stats_request),
1344 aggregate_stats_init,
1345 aggregate_stats_dump,
1365 dp_genl_openflow_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1367 struct datapath *dp;
1368 struct sender sender;
1369 const struct stats_type *s;
1370 struct ofp_stats_reply *osr;
1372 int max_openflow_len, body_len;
1376 /* Set up the cleanup function for this dump. Linux 2.6.20 and later
1377 * support setting up cleanup functions via the .doneit member of
1378 * struct genl_ops. This kluge supports earlier versions also. */
1379 cb->done = dp_genl_openflow_done;
1383 struct nlattr *attrs[DP_GENL_A_MAX + 1];
1384 struct ofp_stats_request *rq;
1386 size_t len, body_len;
1389 err = nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, DP_GENL_A_MAX,
1390 dp_genl_openflow_policy);
1396 if (!attrs[DP_GENL_A_DP_IDX])
1398 dp_idx = nla_get_u16(attrs[DP_GENL_A_DP_IDX]);
1399 dp = dp_get(dp_idx);
1405 va = attrs[DP_GENL_A_OPENFLOW];
1407 if (!va || len < sizeof *rq)
1411 type = ntohs(rq->type);
1412 if (rq->header.version != OFP_VERSION
1413 || rq->header.type != OFPT_STATS_REQUEST
1414 || ntohs(rq->header.length) != len
1415 || type >= ARRAY_SIZE(stats)
1416 || !stats[type].dump)
1420 body_len = len - offsetof(struct ofp_stats_request, body);
1421 if (body_len < s->min_body || body_len > s->max_body)
1425 cb->args[1] = dp_idx;
1427 cb->args[3] = rq->header.xid;
1430 err = s->init(dp, rq->body, body_len, &state);
1433 cb->args[4] = (long) state;
1435 } else if (cb->args[0] == 1) {
1436 dp_idx = cb->args[1];
1437 s = &stats[cb->args[2]];
1439 dp = dp_get(dp_idx);
1449 sender.xid = cb->args[3];
1450 sender.pid = NETLINK_CB(cb->skb).pid;
1451 sender.seq = cb->nlh->nlmsg_seq;
1453 osr = put_openflow_headers(dp, skb, OFPT_STATS_REPLY, &sender,
1459 osr->type = htons(s - stats);
1461 resize_openflow_skb(skb, &osr->header, max_openflow_len);
1463 body_len = max_openflow_len - offsetof(struct ofp_stats_reply, body);
1465 err = s->dump(dp, (void *) cb->args[4], body, &body_len);
1470 osr->flags = ntohs(OFPSF_REPLY_MORE);
1471 resize_openflow_skb(skb, &osr->header,
1472 (offsetof(struct ofp_stats_reply, body)
1483 dp_genl_openflow_done(struct netlink_callback *cb)
1486 const struct stats_type *s = &stats[cb->args[2]];
1488 s->done((void *) cb->args[4]);
1493 static struct genl_ops dp_genl_ops_openflow = {
1494 .cmd = DP_GENL_C_OPENFLOW,
1495 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1496 .policy = dp_genl_openflow_policy,
1497 .doit = dp_genl_openflow,
1498 .dumpit = dp_genl_openflow_dumpit,
1501 static struct nla_policy dp_genl_benchmark_policy[DP_GENL_A_MAX + 1] = {
1502 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1503 [DP_GENL_A_NPACKETS] = { .type = NLA_U32 },
1504 [DP_GENL_A_PSIZE] = { .type = NLA_U32 },
1507 static struct genl_ops dp_genl_ops_benchmark_nl = {
1508 .cmd = DP_GENL_C_BENCHMARK_NL,
1509 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1510 .policy = dp_genl_benchmark_policy,
1511 .doit = dp_genl_benchmark_nl,
1515 static struct genl_ops *dp_genl_all_ops[] = {
1516 /* Keep this operation first. Generic Netlink dispatching
1517 * looks up operations with linear search, so we want it at the
1519 &dp_genl_ops_openflow,
1521 &dp_genl_ops_add_dp,
1522 &dp_genl_ops_del_dp,
1523 &dp_genl_ops_query_dp,
1524 &dp_genl_ops_add_port,
1525 &dp_genl_ops_del_port,
1526 &dp_genl_ops_benchmark_nl,
1529 static int dp_init_netlink(void)
1534 err = genl_register_family(&dp_genl_family);
1538 for (i = 0; i < ARRAY_SIZE(dp_genl_all_ops); i++) {
1539 err = genl_register_ops(&dp_genl_family, dp_genl_all_ops[i]);
1541 goto err_unregister;
1544 strcpy(mc_group.name, "openflow");
1545 err = genl_register_mc_group(&dp_genl_family, &mc_group);
1547 goto err_unregister;
1552 genl_unregister_family(&dp_genl_family);
1556 static void dp_uninit_netlink(void)
1558 genl_unregister_family(&dp_genl_family);
1561 #define DRV_NAME "openflow"
1562 #define DRV_VERSION VERSION
1563 #define DRV_DESCRIPTION "OpenFlow switching datapath implementation"
1564 #define DRV_COPYRIGHT "Copyright (c) 2007, 2008 The Board of Trustees of The Leland Stanford Junior University"
1567 static int __init dp_init(void)
1571 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION "\n");
1572 printk(KERN_INFO DRV_NAME ": " VERSION" built on "__DATE__" "__TIME__"\n");
1573 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
1579 err = dp_init_netlink();
1581 goto error_flow_exit;
1583 /* Hook into callback used by the bridge to intercept packets.
1584 * Parasites we are. */
1585 if (br_handle_frame_hook)
1586 printk("openflow: hijacking bridge hook\n");
1587 br_handle_frame_hook = dp_frame_hook;
1594 printk(KERN_EMERG "openflow: failed to install!");
1598 static void dp_cleanup(void)
1601 dp_uninit_netlink();
1603 br_handle_frame_hook = NULL;
1606 module_init(dp_init);
1607 module_exit(dp_cleanup);
1609 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1610 MODULE_AUTHOR(DRV_COPYRIGHT);
1611 MODULE_LICENSE("GPL");