2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
7 /* Functions for managing the dp interface/device. */
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/if_arp.h>
12 #include <linux/if_bridge.h>
13 #include <linux/if_vlan.h>
15 #include <net/genetlink.h>
17 #include <linux/delay.h>
18 #include <linux/etherdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/mutex.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/rcupdate.h>
24 #include <linux/version.h>
25 #include <linux/ethtool.h>
26 #include <linux/random.h>
27 #include <asm/system.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/inetdevice.h>
30 #include <linux/list.h>
32 #include "openflow-netlink.h"
38 #include "datapath_t.h"
43 /* Number of milliseconds between runs of the maintenance thread. */
44 #define MAINT_SLEEP_MSECS 1000
46 #define BRIDGE_PORT_NO_FLOOD 0x00000001
48 #define UINT32_MAX 4294967295U
49 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
51 struct net_bridge_port {
55 struct net_device *dev;
56 struct list_head node; /* Element in datapath.ports. */
59 static struct genl_family dp_genl_family;
60 static struct genl_multicast_group mc_group;
62 int dp_dev_setup(struct net_device *dev);
64 /* It's hard to imagine wanting more than one datapath, but... */
67 /* datapaths. Protected on the read side by rcu_read_lock, on the write side
70 * It is safe to access the datapath and net_bridge_port structures with just
71 * the dp_mutex, but to access the chain you need to take the rcu_read_lock
72 * also (because dp_mutex doesn't prevent flows from being destroyed).
74 static struct datapath *dps[DP_MAX];
75 static DEFINE_MUTEX(dp_mutex);
77 static int dp_maint_func(void *data);
78 static int send_port_status(struct net_bridge_port *p, uint8_t status);
81 /* nla_shrink - reduce amount of space reserved by nla_reserve
82 * @skb: socket buffer from which to recover room
83 * @nla: netlink attribute to adjust
84 * @len: new length of attribute payload
86 * Reduces amount of space reserved by a call to nla_reserve.
88 * No other attributes may be added between calling nla_reserve and this
89 * function, since it will create a hole in the message.
91 void nla_shrink(struct sk_buff *skb, struct nlattr *nla, int len)
93 int delta = nla_total_size(len) - nla_total_size(nla_len(nla));
97 nla->nla_len = nla_attr_size(len);
100 /* Puts a set of openflow headers for a message of the given 'type' into 'skb'.
101 * If 'sender' is nonnull, then it is used as the message's destination. 'dp'
102 * must specify the datapath to use.
104 * '*max_openflow_len' receives the maximum number of bytes that are available
105 * for the embedded OpenFlow message. The caller must call
106 * resize_openflow_skb() to set the actual size of the message to this number
109 * Returns the openflow header if successful, otherwise (if 'skb' is too small)
112 put_openflow_headers(struct datapath *dp, struct sk_buff *skb, uint8_t type,
113 const struct sender *sender, int *max_openflow_len)
115 struct ofp_header *oh;
119 /* Assemble the Generic Netlink wrapper. */
120 if (!genlmsg_put(skb,
121 sender ? sender->pid : 0,
122 sender ? sender->seq : 0,
123 &dp_genl_family, 0, DP_GENL_C_OPENFLOW))
124 return ERR_PTR(-ENOBUFS);
125 if (nla_put_u32(skb, DP_GENL_A_DP_IDX, dp->dp_idx) < 0)
126 return ERR_PTR(-ENOBUFS);
127 openflow_len = (skb_tailroom(skb) - NLA_HDRLEN) & ~(NLA_ALIGNTO - 1);
128 if (openflow_len < sizeof *oh)
129 return ERR_PTR(-ENOBUFS);
130 *max_openflow_len = openflow_len;
131 attr = nla_reserve(skb, DP_GENL_A_OPENFLOW, openflow_len);
134 /* Fill in the header. The caller is responsible for the length. */
136 oh->version = OFP_VERSION;
138 oh->xid = sender ? sender->xid : 0;
143 /* Resizes OpenFlow header 'oh', which must be at the tail end of 'skb', to new
144 * length 'new_length' (in bytes), adjusting pointers and size values as
147 resize_openflow_skb(struct sk_buff *skb,
148 struct ofp_header *oh, size_t new_length)
150 struct nlattr *attr = ((void *) oh) - NLA_HDRLEN;
151 nla_shrink(skb, attr, new_length);
152 oh->length = htons(new_length);
153 nlmsg_end(skb, (struct nlmsghdr *) skb->data);
156 /* Allocates a new skb to contain an OpenFlow message 'openflow_len' bytes in
157 * length. Returns a null pointer if memory is unavailable, otherwise returns
158 * the OpenFlow header and stores a pointer to the skb in '*pskb'.
160 * 'type' is the OpenFlow message type. If 'sender' is nonnull, then it is
161 * used as the message's destination. 'dp' must specify the datapath to
164 alloc_openflow_skb(struct datapath *dp, size_t openflow_len, uint8_t type,
165 const struct sender *sender, struct sk_buff **pskb)
167 struct ofp_header *oh;
170 int max_openflow_len;
172 genl_len = nlmsg_total_size(GENL_HDRLEN + dp_genl_family.hdrsize);
173 genl_len += nla_total_size(sizeof(uint32_t)); /* DP_GENL_A_DP_IDX */
174 genl_len += nla_total_size(openflow_len); /* DP_GENL_A_OPENFLOW */
175 skb = *pskb = genlmsg_new(genl_len, GFP_ATOMIC);
178 printk("alloc_openflow_skb: genlmsg_new failed\n");
182 oh = put_openflow_headers(dp, skb, type, sender, &max_openflow_len);
183 BUG_ON(!oh || IS_ERR(oh));
184 resize_openflow_skb(skb, oh, openflow_len);
189 /* Sends 'skb' to 'sender' if it is nonnull, otherwise multicasts 'skb' to all
192 send_openflow_skb(struct sk_buff *skb, const struct sender *sender)
195 ? genlmsg_unicast(skb, sender->pid)
196 : genlmsg_multicast(skb, 0, mc_group.id, GFP_ATOMIC));
197 if (err && net_ratelimit())
198 printk(KERN_WARNING "send_openflow_skb: send failed: %d\n",
203 /* Generates a unique datapath id. It incorporates the datapath index
204 * and a hardware address, if available. If not, it generates a random
208 uint64_t gen_datapath_id(uint16_t dp_idx)
212 struct net_device *dev;
214 /* The top 16 bits are used to identify the datapath. The lower 48 bits
215 * use an interface address. */
216 id = (uint64_t)dp_idx << 48;
217 if ((dev = dev_get_by_name(&init_net, "ctl0"))
218 || (dev = dev_get_by_name(&init_net, "eth0"))) {
219 for (i=0; i<ETH_ALEN; i++) {
220 id |= (uint64_t)dev->dev_addr[i] << (8*(ETH_ALEN-1 - i));
224 /* Randomly choose the lower 48 bits if we cannot find an
225 * address and mark the most significant bit to indicate that
226 * this was randomly generated. */
227 uint8_t rand[ETH_ALEN];
228 get_random_bytes(rand, ETH_ALEN);
229 id |= (uint64_t)1 << 63;
230 for (i=0; i<ETH_ALEN; i++) {
231 id |= (uint64_t)rand[i] << (8*(ETH_ALEN-1 - i));
238 /* Creates a new datapath numbered 'dp_idx'. Returns 0 for success or a
239 * negative error code.
241 * Not called with any locks. */
242 static int new_dp(int dp_idx)
247 if (dp_idx < 0 || dp_idx >= DP_MAX)
250 if (!try_module_get(THIS_MODULE))
253 mutex_lock(&dp_mutex);
254 dp = rcu_dereference(dps[dp_idx]);
261 dp = kzalloc(sizeof *dp, GFP_KERNEL);
266 dp->id = gen_datapath_id(dp_idx);
267 dp->chain = chain_create(dp);
268 if (dp->chain == NULL)
270 INIT_LIST_HEAD(&dp->port_list);
273 /* Setup our "of" device */
276 err = dp_dev_setup(&dp->dev);
279 printk("datapath: problem setting up 'of' device\n");
283 dp->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
285 dp->dp_task = kthread_run(dp_maint_func, dp, "dp%d", dp_idx);
286 if (IS_ERR(dp->dp_task))
289 rcu_assign_pointer(dps[dp_idx], dp);
290 mutex_unlock(&dp_mutex);
297 mutex_unlock(&dp_mutex);
298 module_put(THIS_MODULE);
302 /* Find and return a free port number under 'dp'. Called under dp_mutex. */
303 static int find_portno(struct datapath *dp)
306 for (i = 0; i < OFPP_MAX; i++)
307 if (dp->ports[i] == NULL)
312 static struct net_bridge_port *new_nbp(struct datapath *dp,
313 struct net_device *dev)
315 struct net_bridge_port *p;
318 port_no = find_portno(dp);
320 return ERR_PTR(port_no);
322 p = kzalloc(sizeof(*p), GFP_KERNEL);
324 return ERR_PTR(-ENOMEM);
329 p->port_no = port_no;
334 /* Called with dp_mutex. */
335 int add_switch_port(struct datapath *dp, struct net_device *dev)
337 struct net_bridge_port *p;
339 if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
342 if (dev->br_port != NULL)
345 p = new_nbp(dp, dev);
350 rcu_assign_pointer(dev->br_port, p);
352 dev_set_promiscuity(dev, 1);
355 rcu_assign_pointer(dp->ports[p->port_no], p);
356 list_add_rcu(&p->node, &dp->port_list);
358 /* Notify the ctlpath that this port has been added */
359 send_port_status(p, OFPPR_ADD);
364 /* Delete 'p' from switch.
365 * Called with dp_mutex. */
366 static int del_switch_port(struct net_bridge_port *p)
368 /* First drop references to device. */
370 dev_set_promiscuity(p->dev, -1);
372 list_del_rcu(&p->node);
373 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
374 rcu_assign_pointer(p->dev->br_port, NULL);
376 /* Then wait until no one is still using it, and destroy it. */
379 /* Notify the ctlpath that this port no longer exists */
380 send_port_status(p, OFPPR_DELETE);
388 /* Called with dp_mutex. */
389 static void del_dp(struct datapath *dp)
391 struct net_bridge_port *p, *n;
394 /* Unregister the "of" device of this dp */
396 unregister_netdevice(&dp->dev);
400 kthread_stop(dp->dp_task);
402 /* Drop references to DP. */
403 list_for_each_entry_safe (p, n, &dp->port_list, node)
405 rcu_assign_pointer(dps[dp->dp_idx], NULL);
407 /* Wait until no longer in use, then destroy it. */
409 chain_destroy(dp->chain);
411 module_put(THIS_MODULE);
414 static int dp_maint_func(void *data)
416 struct datapath *dp = (struct datapath *) data;
418 while (!kthread_should_stop()) {
420 chain_timeout(dp->chain);
422 int count = chain_timeout(dp->chain);
423 chain_print_stats(dp->chain);
425 printk("%d flows timed out\n", count);
427 msleep_interruptible(MAINT_SLEEP_MSECS);
434 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
435 * different set of devices!) Returns 0 if *pskb should be processed further,
436 * 1 if *pskb is handled. */
437 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
438 /* Called with rcu_read_lock. */
439 static struct sk_buff *dp_frame_hook(struct net_bridge_port *p,
442 struct ethhdr *eh = eth_hdr(skb);
443 struct sk_buff *skb_local = NULL;
446 if (compare_ether_addr(eh->h_dest, skb->dev->dev_addr) == 0)
449 if (is_broadcast_ether_addr(eh->h_dest)
450 || is_multicast_ether_addr(eh->h_dest)
451 || is_local_ether_addr(eh->h_dest))
452 skb_local = skb_clone(skb, GFP_ATOMIC);
454 /* Push the Ethernet header back on. */
455 if (skb->protocol == htons(ETH_P_8021Q))
456 skb_push(skb, VLAN_ETH_HLEN);
458 skb_push(skb, ETH_HLEN);
460 fwd_port_input(p->dp->chain, skb, p->port_no);
464 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
465 static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
467 /* Push the Ethernet header back on. */
468 if ((*pskb)->protocol == htons(ETH_P_8021Q))
469 skb_push(*pskb, VLAN_ETH_HLEN);
471 skb_push(*pskb, ETH_HLEN);
473 fwd_port_input(p->dp->chain, *pskb, p->port_no);
477 /* NB: This has only been tested on 2.4.35 */
479 /* Called without any locks (?) */
480 static void dp_frame_hook(struct sk_buff *skb)
482 struct net_bridge_port *p = skb->dev->br_port;
484 /* Push the Ethernet header back on. */
485 if (skb->protocol == htons(ETH_P_8021Q))
486 skb_push(skb, VLAN_ETH_HLEN);
488 skb_push(skb, ETH_HLEN);
492 fwd_port_input(p->dp->chain, skb, p->port_no);
499 /* Forwarding output path.
500 * Based on net/bridge/br_forward.c. */
502 /* Don't forward packets to originating port or with flooding disabled */
503 static inline int should_deliver(const struct net_bridge_port *p,
504 const struct sk_buff *skb)
506 if ((skb->dev == p->dev) || (p->flags & BRIDGE_PORT_NO_FLOOD)) {
513 static inline unsigned packet_length(const struct sk_buff *skb)
515 int length = skb->len - ETH_HLEN;
516 if (skb->protocol == htons(ETH_P_8021Q))
522 flood(struct datapath *dp, struct sk_buff *skb)
524 struct net_bridge_port *p;
528 list_for_each_entry_rcu (p, &dp->port_list, node) {
529 if (!should_deliver(p, skb))
531 if (prev_port != -1) {
532 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
537 dp_output_port(dp, clone, prev_port);
539 prev_port = p->port_no;
542 dp_output_port(dp, skb, prev_port);
549 /* Marks 'skb' as having originated from 'in_port' in 'dp'.
550 FIXME: how are devices reference counted? */
551 int dp_set_origin(struct datapath *dp, uint16_t in_port,
554 if (in_port < OFPP_MAX && dp->ports[in_port]) {
555 skb->dev = dp->ports[in_port]->dev;
561 /* Takes ownership of 'skb' and transmits it to 'out_port' on 'dp'.
563 int dp_output_port(struct datapath *dp, struct sk_buff *skb, int out_port)
565 struct net_bridge_port *p;
569 if (out_port == OFPP_FLOOD)
570 return flood(dp, skb);
571 else if (out_port == OFPP_CONTROLLER)
572 return dp_output_control(dp, skb, fwd_save_skb(skb), 0,
574 else if (out_port == OFPP_TABLE) {
575 struct sw_flow_key key;
576 struct sw_flow *flow;
578 flow_extract(skb, skb->dev->br_port->port_no, &key);
579 flow = chain_lookup(dp->chain, &key);
580 if (likely(flow != NULL)) {
581 flow_used(flow, skb);
582 execute_actions(dp, skb, &key, flow->actions, flow->n_actions);
586 } else if (out_port >= OFPP_MAX)
589 p = dp->ports[out_port];
594 if (packet_length(skb) > skb->dev->mtu) {
595 printk("dropped over-mtu packet: %d > %d\n",
596 packet_length(skb), skb->dev->mtu);
608 printk("can't forward to bad port %d\n", out_port);
612 /* Takes ownership of 'skb' and transmits it to 'dp''s control path. If
613 * 'buffer_id' != -1, then only the first 64 bytes of 'skb' are sent;
614 * otherwise, all of 'skb' is sent. 'reason' indicates why 'skb' is being
615 * sent. 'max_len' sets the maximum number of bytes that the caller
616 * wants to be sent; a value of 0 indicates the entire packet should be
619 dp_output_control(struct datapath *dp, struct sk_buff *skb,
620 uint32_t buffer_id, size_t max_len, int reason)
622 /* FIXME? Can we avoid creating a new skbuff in the case where we
623 * forward the whole packet? */
624 struct sk_buff *f_skb;
625 struct ofp_packet_in *opi;
626 size_t fwd_len, opi_len;
630 if ((buffer_id != (uint32_t) -1) && max_len)
631 fwd_len = min(fwd_len, max_len);
633 opi_len = offsetof(struct ofp_packet_in, data) + fwd_len;
634 opi = alloc_openflow_skb(dp, opi_len, OFPT_PACKET_IN, NULL, &f_skb);
639 opi->buffer_id = htonl(buffer_id);
640 opi->total_len = htons(skb->len);
641 opi->in_port = htons(skb->dev->br_port->port_no);
642 opi->reason = reason;
644 memcpy(opi->data, skb_mac_header(skb), fwd_len);
645 err = send_openflow_skb(f_skb, NULL);
652 static void fill_port_desc(struct net_bridge_port *p, struct ofp_phy_port *desc)
654 desc->port_no = htons(p->port_no);
655 strncpy(desc->name, p->dev->name, OFP_MAX_PORT_NAME_LEN);
656 desc->name[OFP_MAX_PORT_NAME_LEN-1] = '\0';
657 memcpy(desc->hw_addr, p->dev->dev_addr, ETH_ALEN);
658 desc->flags = htonl(p->flags);
662 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,24)
663 if (p->dev->ethtool_ops && p->dev->ethtool_ops->get_settings) {
664 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
666 if (!p->dev->ethtool_ops->get_settings(p->dev, &ecmd)) {
667 if (ecmd.supported & SUPPORTED_10baseT_Half)
668 desc->features |= OFPPF_10MB_HD;
669 if (ecmd.supported & SUPPORTED_10baseT_Full)
670 desc->features |= OFPPF_10MB_FD;
671 if (ecmd.supported & SUPPORTED_100baseT_Half)
672 desc->features |= OFPPF_100MB_HD;
673 if (ecmd.supported & SUPPORTED_100baseT_Full)
674 desc->features |= OFPPF_100MB_FD;
675 if (ecmd.supported & SUPPORTED_1000baseT_Half)
676 desc->features |= OFPPF_1GB_HD;
677 if (ecmd.supported & SUPPORTED_1000baseT_Full)
678 desc->features |= OFPPF_1GB_FD;
679 /* 10Gbps half-duplex doesn't exist... */
680 if (ecmd.supported & SUPPORTED_10000baseT_Full)
681 desc->features |= OFPPF_10GB_FD;
683 desc->features = htonl(desc->features);
684 desc->speed = htonl(ecmd.speed);
691 fill_features_reply(struct datapath *dp, struct ofp_switch_features *ofr)
693 struct net_bridge_port *p;
696 ofr->datapath_id = cpu_to_be64(dp->id);
698 ofr->n_exact = htonl(2 * TABLE_HASH_MAX_FLOWS);
699 ofr->n_compression = 0; /* Not supported */
700 ofr->n_general = htonl(TABLE_LINEAR_MAX_FLOWS);
701 ofr->buffer_mb = htonl(UINT32_MAX);
702 ofr->n_buffers = htonl(N_PKT_BUFFERS);
703 ofr->capabilities = htonl(OFP_SUPPORTED_CAPABILITIES);
704 ofr->actions = htonl(OFP_SUPPORTED_ACTIONS);
706 list_for_each_entry_rcu (p, &dp->port_list, node) {
707 fill_port_desc(p, &ofr->ports[port_count]);
715 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
718 struct ofp_switch_features *ofr;
719 size_t ofr_len, port_max_len;
723 port_max_len = sizeof(struct ofp_phy_port) * OFPP_MAX;
724 ofr = alloc_openflow_skb(dp, sizeof(*ofr) + port_max_len,
725 OFPT_FEATURES_REPLY, sender, &skb);
730 port_count = fill_features_reply(dp, ofr);
733 ofr_len = sizeof(*ofr) + (sizeof(struct ofp_phy_port) * port_count);
734 resize_openflow_skb(skb, &ofr->header, ofr_len);
735 return send_openflow_skb(skb, sender);
739 dp_send_config_reply(struct datapath *dp, const struct sender *sender)
742 struct ofp_switch_config *osc;
744 osc = alloc_openflow_skb(dp, sizeof *osc, OFPT_GET_CONFIG_REPLY, sender,
749 osc->flags = htons(dp->flags);
750 osc->miss_send_len = htons(dp->miss_send_len);
752 return send_openflow_skb(skb, sender);
756 dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp)
758 struct net_bridge_port *p;
760 p = dp->ports[htons(opp->port_no)];
762 /* Make sure the port id hasn't changed since this was sent */
763 if (!p || memcmp(opp->hw_addr, p->dev->dev_addr, ETH_ALEN) != 0)
766 p->flags = htonl(opp->flags);
773 send_port_status(struct net_bridge_port *p, uint8_t status)
776 struct ofp_port_status *ops;
778 ops = alloc_openflow_skb(p->dp, sizeof *ops, OFPT_PORT_STATUS, NULL,
782 ops->reason = status;
783 fill_port_desc(p, &ops->desc);
785 return send_openflow_skb(skb, NULL);
789 dp_send_flow_expired(struct datapath *dp, struct sw_flow *flow)
792 struct ofp_flow_expired *ofe;
793 unsigned long duration_j;
795 ofe = alloc_openflow_skb(dp, sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &skb);
799 flow_fill_match(&ofe->match, &flow->key);
800 duration_j = (flow->timeout - HZ * flow->max_idle) - flow->init_time;
801 ofe->duration = htonl(duration_j / HZ);
802 ofe->packet_count = cpu_to_be64(flow->packet_count);
803 ofe->byte_count = cpu_to_be64(flow->byte_count);
804 return send_openflow_skb(skb, NULL);
808 fill_flow_stats(struct ofp_flow_stats *ofs, struct sw_flow *flow,
811 ofs->match.wildcards = htons(flow->key.wildcards);
812 ofs->match.in_port = flow->key.in_port;
813 memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
814 memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
815 ofs->match.dl_vlan = flow->key.dl_vlan;
816 ofs->match.dl_type = flow->key.dl_type;
817 ofs->match.nw_src = flow->key.nw_src;
818 ofs->match.nw_dst = flow->key.nw_dst;
819 ofs->match.nw_proto = flow->key.nw_proto;
820 memset(ofs->match.pad, 0, sizeof ofs->match.pad);
821 ofs->match.tp_src = flow->key.tp_src;
822 ofs->match.tp_dst = flow->key.tp_dst;
823 ofs->duration = htonl((jiffies - flow->init_time) / HZ);
824 ofs->priority = htons(flow->priority);
825 ofs->table_id = table_idx;
826 ofs->packet_count = cpu_to_be64(flow->packet_count);
827 ofs->byte_count = cpu_to_be64(flow->byte_count);
831 fill_port_stats_reply(struct datapath *dp, struct ofp_port_stats_reply *psr)
833 struct net_bridge_port *p;
836 list_for_each_entry_rcu (p, &dp->port_list, node) {
837 struct ofp_port_stats *ps = &psr->ports[port_count++];
838 struct net_device_stats *stats = p->dev->get_stats(p->dev);
839 ps->port_no = htons(p->port_no);
840 memset(ps->pad, 0, sizeof ps->pad);
841 ps->rx_count = cpu_to_be64(stats->rx_packets);
842 ps->tx_count = cpu_to_be64(stats->tx_packets);
843 ps->drop_count = cpu_to_be64(stats->rx_dropped
844 + stats->tx_dropped);
851 dp_send_port_stats(struct datapath *dp, const struct sender *sender)
854 struct ofp_port_stats_reply *psr;
855 size_t psr_len, port_max_len;
859 port_max_len = sizeof(struct ofp_port_stats) * OFPP_MAX;
860 psr = alloc_openflow_skb(dp, sizeof *psr + port_max_len,
861 OFPT_PORT_STATS_REPLY, sender, &skb);
866 port_count = fill_port_stats_reply(dp, psr);
869 psr_len = sizeof *psr + sizeof(struct ofp_port_stats) * port_count;
870 resize_openflow_skb(skb, &psr->header, psr_len);
871 return send_openflow_skb(skb, sender);
875 dp_send_table_stats(struct datapath *dp, const struct sender *sender)
878 struct ofp_table_stats_reply *tsr;
881 n_tables = dp->chain->n_tables;
882 tsr = alloc_openflow_skb(dp, (offsetof(struct ofp_table_stats_reply,
884 + sizeof tsr->tables[0] * n_tables),
885 OFPT_TABLE_STATS_REPLY, sender, &skb);
888 for (i = 0; i < n_tables; i++) {
889 struct ofp_table_stats *ots = &tsr->tables[i];
890 struct sw_table_stats stats;
891 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
892 strncpy(ots->name, stats.name, sizeof ots->name);
894 ots->pad[0] = ots->pad[1] = 0;
895 ots->max_entries = htonl(stats.max_flows);
896 ots->active_count = htonl(stats.n_flows);
897 ots->matched_count = cpu_to_be64(0); /* FIXME */
899 return send_openflow_skb(skb, sender);
902 /* Generic Netlink interface.
904 * See netlink(7) for an introduction to netlink. See
905 * http://linux-net.osdl.org/index.php/Netlink for more information and
906 * pointers on how to work with netlink and Generic Netlink in the kernel and
909 static struct genl_family dp_genl_family = {
910 .id = GENL_ID_GENERATE,
912 .name = DP_GENL_FAMILY_NAME,
914 .maxattr = DP_GENL_A_MAX,
917 /* Attribute policy: what each attribute may contain. */
918 static struct nla_policy dp_genl_policy[DP_GENL_A_MAX + 1] = {
919 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
920 [DP_GENL_A_MC_GROUP] = { .type = NLA_U32 },
921 [DP_GENL_A_PORTNAME] = { .type = NLA_STRING }
924 static int dp_genl_add(struct sk_buff *skb, struct genl_info *info)
926 if (!info->attrs[DP_GENL_A_DP_IDX])
929 return new_dp(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
932 static struct genl_ops dp_genl_ops_add_dp = {
933 .cmd = DP_GENL_C_ADD_DP,
934 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
935 .policy = dp_genl_policy,
940 struct datapath *dp_get(int dp_idx)
942 if (dp_idx < 0 || dp_idx > DP_MAX)
944 return rcu_dereference(dps[dp_idx]);
947 static int dp_genl_del(struct sk_buff *skb, struct genl_info *info)
952 if (!info->attrs[DP_GENL_A_DP_IDX])
955 mutex_lock(&dp_mutex);
956 dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX])));
963 mutex_unlock(&dp_mutex);
967 static struct genl_ops dp_genl_ops_del_dp = {
968 .cmd = DP_GENL_C_DEL_DP,
969 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
970 .policy = dp_genl_policy,
975 /* Queries a datapath for related information. Currently the only relevant
976 * information is the datapath's multicast group ID. Really we want one
977 * multicast group per datapath, but because of locking issues[*] we can't
978 * easily get one. Thus, every datapath will currently return the same
979 * global multicast group ID, but in the future it would be nice to fix that.
981 * [*] dp_genl_add, to add a new datapath, is called under the genl_lock
982 * mutex, and genl_register_mc_group, called to acquire a new multicast
983 * group ID, also acquires genl_lock, thus deadlock.
985 static int dp_genl_query(struct sk_buff *skb, struct genl_info *info)
988 struct sk_buff *ans_skb = NULL;
992 if (!info->attrs[DP_GENL_A_DP_IDX])
996 dp_idx = nla_get_u32((info->attrs[DP_GENL_A_DP_IDX]));
1002 ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1007 data = genlmsg_put_reply(ans_skb, info, &dp_genl_family,
1008 0, DP_GENL_C_QUERY_DP);
1013 NLA_PUT_U32(ans_skb, DP_GENL_A_DP_IDX, dp_idx);
1014 NLA_PUT_U32(ans_skb, DP_GENL_A_MC_GROUP, mc_group.id);
1016 genlmsg_end(ans_skb, data);
1017 err = genlmsg_reply(ans_skb, info);
1029 static struct genl_ops dp_genl_ops_query_dp = {
1030 .cmd = DP_GENL_C_QUERY_DP,
1031 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1032 .policy = dp_genl_policy,
1033 .doit = dp_genl_query,
1037 static int dp_genl_add_del_port(struct sk_buff *skb, struct genl_info *info)
1039 struct datapath *dp;
1040 struct net_device *port;
1043 if (!info->attrs[DP_GENL_A_DP_IDX] || !info->attrs[DP_GENL_A_PORTNAME])
1047 mutex_lock(&dp_mutex);
1048 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1054 /* Get interface to add/remove. */
1055 port = dev_get_by_name(&init_net,
1056 nla_data(info->attrs[DP_GENL_A_PORTNAME]));
1062 /* Execute operation. */
1063 if (info->genlhdr->cmd == DP_GENL_C_ADD_PORT)
1064 err = add_switch_port(dp, port);
1066 if (port->br_port == NULL || port->br_port->dp != dp) {
1070 err = del_switch_port(port->br_port);
1076 mutex_unlock(&dp_mutex);
1080 static struct genl_ops dp_genl_ops_add_port = {
1081 .cmd = DP_GENL_C_ADD_PORT,
1082 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1083 .policy = dp_genl_policy,
1084 .doit = dp_genl_add_del_port,
1088 static struct genl_ops dp_genl_ops_del_port = {
1089 .cmd = DP_GENL_C_DEL_PORT,
1090 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1091 .policy = dp_genl_policy,
1092 .doit = dp_genl_add_del_port,
1096 static int dp_genl_openflow(struct sk_buff *skb, struct genl_info *info)
1098 struct nlattr *va = info->attrs[DP_GENL_A_OPENFLOW];
1099 struct datapath *dp;
1100 struct ofp_header *oh;
1101 struct sender sender;
1104 if (!info->attrs[DP_GENL_A_DP_IDX] || !va)
1108 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1114 if (nla_len(va) < sizeof(struct ofp_header)) {
1120 sender.xid = oh->xid;
1121 sender.pid = info->snd_pid;
1122 sender.seq = info->snd_seq;
1123 err = fwd_control_input(dp->chain, &sender, nla_data(va), nla_len(va));
1130 static struct nla_policy dp_genl_openflow_policy[DP_GENL_A_MAX + 1] = {
1131 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1134 struct flow_stats_cb_state {
1137 struct sw_table_position position;
1138 struct ofp_flow_stats_request *rq;
1139 int sent_terminator;
1141 struct ofp_flow_stats *flows;
1142 int n_flows, max_flows;
1145 static int muster_callback(struct sw_flow *flow, void *private)
1147 struct flow_stats_cb_state *s = private;
1149 fill_flow_stats(&s->flows[s->n_flows], flow, s->table_idx);
1150 return ++s->n_flows >= s->max_flows;
1154 muster_flow_stats(struct datapath *dp, struct flow_stats_cb_state *s,
1155 const struct sender *sender, struct sk_buff *skb)
1157 struct ofp_flow_stats_reply *fsr;
1158 size_t header_size, flow_size;
1159 struct sw_flow_key match_key;
1160 int max_openflow_len;
1163 fsr = put_openflow_headers(dp, skb, OFPT_FLOW_STATS_REPLY, sender,
1166 return PTR_ERR(fsr);
1167 resize_openflow_skb(skb, &fsr->header, max_openflow_len);
1169 header_size = offsetof(struct ofp_flow_stats_reply, flows);
1170 flow_size = sizeof fsr->flows[0];
1171 s->max_flows = (max_openflow_len - header_size) / flow_size;
1172 if (s->max_flows <= 0)
1174 s->flows = fsr->flows;
1176 flow_extract_match(&match_key, &s->rq->match);
1178 while (s->table_idx < dp->chain->n_tables
1179 && (s->rq->table_id == 0xff || s->rq->table_id == s->table_idx))
1181 struct sw_table *table = dp->chain->tables[s->table_idx];
1183 if (table->iterate(table, &match_key, &s->position,
1184 muster_callback, s))
1188 memset(&s->position, 0, sizeof s->position);
1191 /* Signal dump completion. */
1192 if (s->sent_terminator) {
1195 s->sent_terminator = 1;
1197 size = header_size + flow_size * s->n_flows;
1198 resize_openflow_skb(skb, &fsr->header, size);
1203 dp_genl_openflow_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1205 struct datapath *dp;
1206 struct sender sender;
1207 struct flow_stats_cb_state *state;
1211 struct nlattr *attrs[DP_GENL_A_MAX + 1];
1212 struct ofp_flow_stats_request *rq;
1215 err = nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs, DP_GENL_A_MAX,
1216 dp_genl_openflow_policy);
1220 if (!attrs[DP_GENL_A_DP_IDX])
1223 va = attrs[DP_GENL_A_OPENFLOW];
1224 if (!va || nla_len(va) != sizeof *state->rq)
1228 if (rq->header.version != OFP_VERSION
1229 || rq->header.type != OFPT_FLOW_STATS_REQUEST
1230 || ntohs(rq->header.length) != sizeof *rq)
1233 state = kmalloc(sizeof *state, GFP_KERNEL);
1236 state->dp_idx = nla_get_u32(attrs[DP_GENL_A_DP_IDX]);
1237 state->table_idx = rq->table_id == 0xff ? 0 : rq->table_id;
1238 memset(&state->position, 0, sizeof state->position);
1240 state->sent_terminator = 0;
1242 cb->args[0] = (long) state;
1244 state = (struct flow_stats_cb_state *) cb->args[0];
1247 if (state->rq->type != OFPFS_INDIV) {
1252 dp = dp_get(state->dp_idx);
1258 sender.xid = state->rq->header.xid;
1259 sender.pid = NETLINK_CB(cb->skb).pid;
1260 sender.seq = cb->nlh->nlmsg_seq;
1261 err = muster_flow_stats(dp, state, &sender, skb);
1269 dp_genl_openflow_done(struct netlink_callback *cb)
1271 struct flow_stats_cb_state *state;
1272 state = (struct flow_stats_cb_state *) cb->args[0];
1277 static struct genl_ops dp_genl_ops_openflow = {
1278 .cmd = DP_GENL_C_OPENFLOW,
1279 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1280 .policy = dp_genl_openflow_policy,
1281 .doit = dp_genl_openflow,
1282 .dumpit = dp_genl_openflow_dumpit,
1283 .done = dp_genl_openflow_done,
1286 static struct nla_policy dp_genl_benchmark_policy[DP_GENL_A_MAX + 1] = {
1287 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1288 [DP_GENL_A_NPACKETS] = { .type = NLA_U32 },
1289 [DP_GENL_A_PSIZE] = { .type = NLA_U32 },
1292 static struct genl_ops dp_genl_ops_benchmark_nl = {
1293 .cmd = DP_GENL_C_BENCHMARK_NL,
1294 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1295 .policy = dp_genl_benchmark_policy,
1296 .doit = dp_genl_benchmark_nl,
1300 static struct genl_ops *dp_genl_all_ops[] = {
1301 /* Keep this operation first. Generic Netlink dispatching
1302 * looks up operations with linear search, so we want it at the
1304 &dp_genl_ops_openflow,
1306 &dp_genl_ops_add_dp,
1307 &dp_genl_ops_del_dp,
1308 &dp_genl_ops_query_dp,
1309 &dp_genl_ops_add_port,
1310 &dp_genl_ops_del_port,
1311 &dp_genl_ops_benchmark_nl,
1314 static int dp_init_netlink(void)
1319 err = genl_register_family(&dp_genl_family);
1323 for (i = 0; i < ARRAY_SIZE(dp_genl_all_ops); i++) {
1324 err = genl_register_ops(&dp_genl_family, dp_genl_all_ops[i]);
1326 goto err_unregister;
1329 strcpy(mc_group.name, "openflow");
1330 err = genl_register_mc_group(&dp_genl_family, &mc_group);
1332 goto err_unregister;
1337 genl_unregister_family(&dp_genl_family);
1341 static void dp_uninit_netlink(void)
1343 genl_unregister_family(&dp_genl_family);
1346 #define DRV_NAME "openflow"
1347 #define DRV_VERSION VERSION
1348 #define DRV_DESCRIPTION "OpenFlow switching datapath implementation"
1349 #define DRV_COPYRIGHT "Copyright (c) 2007, 2008 The Board of Trustees of The Leland Stanford Junior University"
1352 static int __init dp_init(void)
1356 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION "\n");
1357 printk(KERN_INFO DRV_NAME ": " VERSION" built on "__DATE__" "__TIME__"\n");
1358 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
1364 err = dp_init_netlink();
1366 goto error_flow_exit;
1368 /* Hook into callback used by the bridge to intercept packets.
1369 * Parasites we are. */
1370 if (br_handle_frame_hook)
1371 printk("openflow: hijacking bridge hook\n");
1372 br_handle_frame_hook = dp_frame_hook;
1379 printk(KERN_EMERG "openflow: failed to install!");
1383 static void dp_cleanup(void)
1386 dp_uninit_netlink();
1388 br_handle_frame_hook = NULL;
1391 module_init(dp_init);
1392 module_exit(dp_cleanup);
1394 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1395 MODULE_AUTHOR(DRV_COPYRIGHT);
1396 MODULE_LICENSE("GPL");