2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
7 /* Functions for managing the dp interface/device. */
9 #include <linux/module.h>
10 #include <linux/if_arp.h>
11 #include <linux/if_bridge.h>
12 #include <linux/if_vlan.h>
14 #include <net/genetlink.h>
16 #include <linux/delay.h>
17 #include <linux/etherdevice.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/mutex.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/rcupdate.h>
23 #include <linux/version.h>
24 #include <linux/ethtool.h>
25 #include <linux/random.h>
26 #include <asm/system.h>
27 #include <linux/netfilter_bridge.h>
28 #include <linux/inetdevice.h>
29 #include <linux/list.h>
31 #include "openflow-netlink.h"
37 #include "datapath_t.h"
42 /* Number of milliseconds between runs of the maintenance thread. */
43 #define MAINT_SLEEP_MSECS 1000
45 #define BRIDGE_PORT_NO_FLOOD 0x00000001
47 #define UINT32_MAX 4294967295U
48 #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
50 struct net_bridge_port {
54 struct net_device *dev;
55 struct list_head node; /* Element in datapath.ports. */
58 static struct genl_family dp_genl_family;
59 static struct genl_multicast_group mc_group;
61 int dp_dev_setup(struct net_device *dev);
63 /* It's hard to imagine wanting more than one datapath, but... */
66 /* datapaths. Protected on the read side by rcu_read_lock, on the write side
69 * It is safe to access the datapath and net_bridge_port structures with just
70 * the dp_mutex, but to access the chain you need to take the rcu_read_lock
71 * also (because dp_mutex doesn't prevent flows from being destroyed).
73 static struct datapath *dps[DP_MAX];
74 static DEFINE_MUTEX(dp_mutex);
76 static int dp_maint_func(void *data);
77 static int send_port_status(struct net_bridge_port *p, uint8_t status);
80 /* nla_unreserve - reduce amount of space reserved by nla_reserve
81 * @skb: socket buffer from which to recover room
82 * @nla: netlink attribute to adjust
83 * @len: amount by which to reduce attribute payload
85 * Reduces amount of space reserved by a call to nla_reserve.
87 * No other attributes may be added between calling nla_reserve and this
88 * function, since it will create a hole in the message.
90 void nla_unreserve(struct sk_buff *skb, struct nlattr *nla, int len)
99 alloc_openflow_skb(struct datapath *dp, size_t openflow_len, uint8_t type,
100 const struct sender *sender, struct sk_buff **pskb)
105 struct ofp_header *oh;
107 genl_len = nla_total_size(sizeof(uint32_t)); /* DP_GENL_A_DP_IDX */
108 genl_len += nla_total_size(openflow_len); /* DP_GENL_A_OPENFLOW */
109 skb = *pskb = genlmsg_new(genl_len, GFP_ATOMIC);
112 printk("alloc_openflow_skb: genlmsg_new failed\n");
116 /* Assemble the Generic Netlink wrapper. */
117 if (!genlmsg_put(skb,
118 sender ? sender->pid : 0,
119 sender ? sender->seq : 0,
120 &dp_genl_family, 0, DP_GENL_C_OPENFLOW))
122 if (nla_put_u32(skb, DP_GENL_A_DP_IDX, dp->dp_idx) < 0)
124 attr = nla_reserve(skb, DP_GENL_A_OPENFLOW, openflow_len);
126 nlmsg_end(skb, (struct nlmsghdr *) skb->data);
128 /* Fill in the header. */
130 oh->version = OFP_VERSION;
132 oh->length = htons(openflow_len);
133 oh->xid = sender ? sender->xid : 0;
139 resize_openflow_skb(struct sk_buff *skb,
140 struct ofp_header *oh, size_t new_length)
144 BUG_ON(new_length > ntohs(oh->length));
145 attr = ((void *) oh) - NLA_HDRLEN;
146 nla_unreserve(skb, attr, ntohs(oh->length) - new_length);
147 oh->length = htons(new_length);
148 nlmsg_end(skb, (struct nlmsghdr *) skb->data);
152 send_openflow_skb(struct sk_buff *skb, const struct sender *sender)
155 ? genlmsg_unicast(skb, sender->pid)
156 : genlmsg_multicast(skb, 0, mc_group.id, GFP_ATOMIC));
157 if (err && net_ratelimit())
158 printk(KERN_WARNING "send_openflow_skb: send failed: %d\n",
163 /* Generates a unique datapath id. It incorporates the datapath index
164 * and a hardware address, if available. If not, it generates a random
168 uint64_t gen_datapath_id(uint16_t dp_idx)
172 struct net_device *dev;
174 /* The top 16 bits are used to identify the datapath. The lower 48 bits
175 * use an interface address. */
176 id = (uint64_t)dp_idx << 48;
177 if ((dev = dev_get_by_name(&init_net, "ctl0"))
178 || (dev = dev_get_by_name(&init_net, "eth0"))) {
179 for (i=0; i<ETH_ALEN; i++) {
180 id |= (uint64_t)dev->dev_addr[i] << (8*(ETH_ALEN-1 - i));
184 /* Randomly choose the lower 48 bits if we cannot find an
185 * address and mark the most significant bit to indicate that
186 * this was randomly generated. */
187 uint8_t rand[ETH_ALEN];
188 get_random_bytes(rand, ETH_ALEN);
189 id |= (uint64_t)1 << 63;
190 for (i=0; i<ETH_ALEN; i++) {
191 id |= (uint64_t)rand[i] << (8*(ETH_ALEN-1 - i));
198 /* Creates a new datapath numbered 'dp_idx'. Returns 0 for success or a
199 * negative error code.
201 * Not called with any locks. */
202 static int new_dp(int dp_idx)
207 if (dp_idx < 0 || dp_idx >= DP_MAX)
210 if (!try_module_get(THIS_MODULE))
213 mutex_lock(&dp_mutex);
214 dp = rcu_dereference(dps[dp_idx]);
221 dp = kzalloc(sizeof *dp, GFP_KERNEL);
226 dp->id = gen_datapath_id(dp_idx);
227 dp->chain = chain_create(dp);
228 if (dp->chain == NULL)
230 INIT_LIST_HEAD(&dp->port_list);
233 /* Setup our "of" device */
236 err = dp_dev_setup(&dp->dev);
239 printk("datapath: problem setting up 'of' device\n");
242 dp->config.flags = 0;
243 dp->config.miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
245 dp->dp_task = kthread_run(dp_maint_func, dp, "dp%d", dp_idx);
246 if (IS_ERR(dp->dp_task))
249 rcu_assign_pointer(dps[dp_idx], dp);
250 mutex_unlock(&dp_mutex);
257 mutex_unlock(&dp_mutex);
258 module_put(THIS_MODULE);
262 /* Find and return a free port number under 'dp'. Called under dp_mutex. */
263 static int find_portno(struct datapath *dp)
266 for (i = 0; i < OFPP_MAX; i++)
267 if (dp->ports[i] == NULL)
272 static struct net_bridge_port *new_nbp(struct datapath *dp,
273 struct net_device *dev)
275 struct net_bridge_port *p;
278 port_no = find_portno(dp);
280 return ERR_PTR(port_no);
282 p = kzalloc(sizeof(*p), GFP_KERNEL);
284 return ERR_PTR(-ENOMEM);
289 p->port_no = port_no;
294 /* Called with dp_mutex. */
295 int add_switch_port(struct datapath *dp, struct net_device *dev)
297 struct net_bridge_port *p;
299 if (dev->flags & IFF_LOOPBACK || dev->type != ARPHRD_ETHER)
302 if (dev->br_port != NULL)
305 p = new_nbp(dp, dev);
310 rcu_assign_pointer(dev->br_port, p);
312 dev_set_promiscuity(dev, 1);
315 rcu_assign_pointer(dp->ports[p->port_no], p);
316 list_add_rcu(&p->node, &dp->port_list);
318 /* Notify the ctlpath that this port has been added */
319 send_port_status(p, OFPPR_ADD);
324 /* Delete 'p' from switch.
325 * Called with dp_mutex. */
326 static int del_switch_port(struct net_bridge_port *p)
328 /* First drop references to device. */
330 dev_set_promiscuity(p->dev, -1);
332 list_del_rcu(&p->node);
333 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
334 rcu_assign_pointer(p->dev->br_port, NULL);
336 /* Then wait until no one is still using it, and destroy it. */
339 /* Notify the ctlpath that this port no longer exists */
340 send_port_status(p, OFPPR_DELETE);
348 /* Called with dp_mutex. */
349 static void del_dp(struct datapath *dp)
351 struct net_bridge_port *p, *n;
354 /* Unregister the "of" device of this dp */
356 unregister_netdevice(&dp->dev);
360 kthread_stop(dp->dp_task);
362 /* Drop references to DP. */
363 list_for_each_entry_safe (p, n, &dp->port_list, node)
365 rcu_assign_pointer(dps[dp->dp_idx], NULL);
367 /* Wait until no longer in use, then destroy it. */
369 chain_destroy(dp->chain);
371 module_put(THIS_MODULE);
374 static int dp_maint_func(void *data)
376 struct datapath *dp = (struct datapath *) data;
378 while (!kthread_should_stop()) {
380 chain_timeout(dp->chain);
382 int count = chain_timeout(dp->chain);
383 chain_print_stats(dp->chain);
385 printk("%d flows timed out\n", count);
387 msleep_interruptible(MAINT_SLEEP_MSECS);
394 * Used as br_handle_frame_hook. (Cannot run bridge at the same time, even on
395 * different set of devices!) Returns 0 if *pskb should be processed further,
396 * 1 if *pskb is handled. */
397 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
398 /* Called with rcu_read_lock. */
399 static struct sk_buff *dp_frame_hook(struct net_bridge_port *p,
402 struct ethhdr *eh = eth_hdr(skb);
403 struct sk_buff *skb_local = NULL;
406 if (compare_ether_addr(eh->h_dest, skb->dev->dev_addr) == 0)
409 if (is_broadcast_ether_addr(eh->h_dest)
410 || is_multicast_ether_addr(eh->h_dest)
411 || is_local_ether_addr(eh->h_dest))
412 skb_local = skb_clone(skb, GFP_ATOMIC);
414 /* Push the Ethernet header back on. */
415 if (skb->protocol == htons(ETH_P_8021Q))
416 skb_push(skb, VLAN_ETH_HLEN);
418 skb_push(skb, ETH_HLEN);
420 fwd_port_input(p->dp->chain, skb, p->port_no);
424 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
425 static int dp_frame_hook(struct net_bridge_port *p, struct sk_buff **pskb)
427 /* Push the Ethernet header back on. */
428 if ((*pskb)->protocol == htons(ETH_P_8021Q))
429 skb_push(*pskb, VLAN_ETH_HLEN);
431 skb_push(*pskb, ETH_HLEN);
433 fwd_port_input(p->dp->chain, *pskb, p->port_no);
437 /* NB: This has only been tested on 2.4.35 */
439 /* Called without any locks (?) */
440 static void dp_frame_hook(struct sk_buff *skb)
442 struct net_bridge_port *p = skb->dev->br_port;
444 /* Push the Ethernet header back on. */
445 if (skb->protocol == htons(ETH_P_8021Q))
446 skb_push(skb, VLAN_ETH_HLEN);
448 skb_push(skb, ETH_HLEN);
452 fwd_port_input(p->dp->chain, skb, p->port_no);
459 /* Forwarding output path.
460 * Based on net/bridge/br_forward.c. */
462 /* Don't forward packets to originating port or with flooding disabled */
463 static inline int should_deliver(const struct net_bridge_port *p,
464 const struct sk_buff *skb)
466 if ((skb->dev == p->dev) || (p->flags & BRIDGE_PORT_NO_FLOOD)) {
473 static inline unsigned packet_length(const struct sk_buff *skb)
475 int length = skb->len - ETH_HLEN;
476 if (skb->protocol == htons(ETH_P_8021Q))
482 flood(struct datapath *dp, struct sk_buff *skb)
484 struct net_bridge_port *p;
488 list_for_each_entry_rcu (p, &dp->port_list, node) {
489 if (!should_deliver(p, skb))
491 if (prev_port != -1) {
492 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
497 dp_output_port(dp, clone, prev_port);
499 prev_port = p->port_no;
502 dp_output_port(dp, skb, prev_port);
509 /* Marks 'skb' as having originated from 'in_port' in 'dp'.
510 FIXME: how are devices reference counted? */
511 int dp_set_origin(struct datapath *dp, uint16_t in_port,
514 if (in_port < OFPP_MAX && dp->ports[in_port]) {
515 skb->dev = dp->ports[in_port]->dev;
521 /* Takes ownership of 'skb' and transmits it to 'out_port' on 'dp'.
523 int dp_output_port(struct datapath *dp, struct sk_buff *skb, int out_port)
525 struct net_bridge_port *p;
529 if (out_port == OFPP_FLOOD)
530 return flood(dp, skb);
531 else if (out_port == OFPP_CONTROLLER)
532 return dp_output_control(dp, skb, fwd_save_skb(skb), 0,
534 else if (out_port >= OFPP_MAX)
537 p = dp->ports[out_port];
542 if (packet_length(skb) > skb->dev->mtu) {
543 printk("dropped over-mtu packet: %d > %d\n",
544 packet_length(skb), skb->dev->mtu);
556 printk("can't forward to bad port %d\n", out_port);
560 /* Takes ownership of 'skb' and transmits it to 'dp''s control path. If
561 * 'buffer_id' != -1, then only the first 64 bytes of 'skb' are sent;
562 * otherwise, all of 'skb' is sent. 'reason' indicates why 'skb' is being
563 * sent. 'max_len' sets the maximum number of bytes that the caller
564 * wants to be sent; a value of 0 indicates the entire packet should be
567 dp_output_control(struct datapath *dp, struct sk_buff *skb,
568 uint32_t buffer_id, size_t max_len, int reason)
570 /* FIXME? Can we avoid creating a new skbuff in the case where we
571 * forward the whole packet? */
572 struct sk_buff *f_skb;
573 struct ofp_packet_in *opi;
574 size_t fwd_len, opi_len;
578 if ((buffer_id != (uint32_t) -1) && max_len)
579 fwd_len = min(fwd_len, max_len);
581 opi_len = offsetof(struct ofp_packet_in, data) + fwd_len;
582 opi = alloc_openflow_skb(dp, opi_len, OFPT_PACKET_IN, NULL, &f_skb);
583 opi->buffer_id = htonl(buffer_id);
584 opi->total_len = htons(skb->len);
585 opi->in_port = htons(skb->dev->br_port->port_no);
586 opi->reason = reason;
588 memcpy(opi->data, skb_mac_header(skb), fwd_len);
589 err = send_openflow_skb(f_skb, NULL);
596 static void fill_port_desc(struct net_bridge_port *p, struct ofp_phy_port *desc)
598 desc->port_no = htons(p->port_no);
599 strncpy(desc->name, p->dev->name, OFP_MAX_PORT_NAME_LEN);
600 desc->name[OFP_MAX_PORT_NAME_LEN-1] = '\0';
601 memcpy(desc->hw_addr, p->dev->dev_addr, ETH_ALEN);
602 desc->flags = htonl(p->flags);
606 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,24)
607 if (p->dev->ethtool_ops && p->dev->ethtool_ops->get_settings) {
608 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
610 if (!p->dev->ethtool_ops->get_settings(p->dev, &ecmd)) {
611 if (ecmd.supported & SUPPORTED_10baseT_Half)
612 desc->features |= OFPPF_10MB_HD;
613 if (ecmd.supported & SUPPORTED_10baseT_Full)
614 desc->features |= OFPPF_10MB_FD;
615 if (ecmd.supported & SUPPORTED_100baseT_Half)
616 desc->features |= OFPPF_100MB_HD;
617 if (ecmd.supported & SUPPORTED_100baseT_Full)
618 desc->features |= OFPPF_100MB_FD;
619 if (ecmd.supported & SUPPORTED_1000baseT_Half)
620 desc->features |= OFPPF_1GB_HD;
621 if (ecmd.supported & SUPPORTED_1000baseT_Full)
622 desc->features |= OFPPF_1GB_FD;
623 /* 10Gbps half-duplex doesn't exist... */
624 if (ecmd.supported & SUPPORTED_10000baseT_Full)
625 desc->features |= OFPPF_10GB_FD;
627 desc->features = htonl(desc->features);
628 desc->speed = htonl(ecmd.speed);
635 fill_features_reply(struct datapath *dp, struct ofp_switch_features *ofr)
637 struct net_bridge_port *p;
640 ofr->datapath_id = cpu_to_be64(dp->id);
642 ofr->n_exact = htonl(2 * TABLE_HASH_MAX_FLOWS);
643 ofr->n_mac_only = htonl(TABLE_MAC_MAX_FLOWS);
644 ofr->n_compression = 0; /* Not supported */
645 ofr->n_general = htonl(TABLE_LINEAR_MAX_FLOWS);
646 ofr->buffer_mb = htonl(UINT32_MAX);
647 ofr->n_buffers = htonl(N_PKT_BUFFERS);
648 ofr->capabilities = htonl(OFP_SUPPORTED_CAPABILITIES);
649 ofr->actions = htonl(OFP_SUPPORTED_ACTIONS);
651 list_for_each_entry_rcu (p, &dp->port_list, node) {
652 fill_port_desc(p, &ofr->ports[port_count]);
660 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
663 struct ofp_switch_features *ofr;
664 size_t ofr_len, port_max_len;
668 port_max_len = sizeof(struct ofp_phy_port) * OFPP_MAX;
669 ofr = alloc_openflow_skb(dp, sizeof(*ofr) + port_max_len,
670 OFPT_FEATURES_REPLY, sender, &skb);
675 port_count = fill_features_reply(dp, ofr);
678 ofr_len = sizeof(*ofr) + (sizeof(struct ofp_phy_port) * port_count);
679 resize_openflow_skb(skb, &ofr->header, ofr_len);
680 return send_openflow_skb(skb, sender);
684 dp_send_config_reply(struct datapath *dp, const struct sender *sender)
687 struct ofp_switch_config *osc;
689 osc = alloc_openflow_skb(dp, sizeof *osc, OFPT_PORT_STATUS, sender,
693 memcpy(((char *)osc) + sizeof osc->header,
694 ((char *)&dp->config) + sizeof dp->config.header,
695 sizeof dp->config - sizeof dp->config.header);
696 return send_openflow_skb(skb, sender);
700 dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp)
702 struct net_bridge_port *p;
704 p = dp->ports[htons(opp->port_no)];
706 /* Make sure the port id hasn't changed since this was sent */
707 if (!p || memcmp(opp->hw_addr, p->dev->dev_addr, ETH_ALEN) != 0)
710 p->flags = htonl(opp->flags);
717 send_port_status(struct net_bridge_port *p, uint8_t status)
720 struct ofp_port_status *ops;
722 ops = alloc_openflow_skb(p->dp, sizeof *ops, OFPT_PORT_STATUS, NULL,
726 ops->reason = status;
727 fill_port_desc(p, &ops->desc);
729 return send_openflow_skb(skb, NULL);
733 dp_send_flow_expired(struct datapath *dp, struct sw_flow *flow)
736 struct ofp_flow_expired *ofe;
737 unsigned long duration_j;
739 ofe = alloc_openflow_skb(dp, sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &skb);
743 flow_fill_match(&ofe->match, &flow->key);
744 duration_j = (flow->timeout - HZ * flow->max_idle) - flow->init_time;
745 ofe->duration = htonl(duration_j / HZ);
746 ofe->packet_count = cpu_to_be64(flow->packet_count);
747 ofe->byte_count = cpu_to_be64(flow->byte_count);
748 return send_openflow_skb(skb, NULL);
751 /* Generic Netlink interface.
753 * See netlink(7) for an introduction to netlink. See
754 * http://linux-net.osdl.org/index.php/Netlink for more information and
755 * pointers on how to work with netlink and Generic Netlink in the kernel and
758 static struct genl_family dp_genl_family = {
759 .id = GENL_ID_GENERATE,
761 .name = DP_GENL_FAMILY_NAME,
763 .maxattr = DP_GENL_A_MAX,
766 /* Attribute policy: what each attribute may contain. */
767 static struct nla_policy dp_genl_policy[DP_GENL_A_MAX + 1] = {
768 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
769 [DP_GENL_A_MC_GROUP] = { .type = NLA_U32 },
770 [DP_GENL_A_PORTNAME] = { .type = NLA_STRING }
773 static int dp_genl_add(struct sk_buff *skb, struct genl_info *info)
775 if (!info->attrs[DP_GENL_A_DP_IDX])
778 return new_dp(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
781 static struct genl_ops dp_genl_ops_add_dp = {
782 .cmd = DP_GENL_C_ADD_DP,
783 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
784 .policy = dp_genl_policy,
789 struct datapath *dp_get(int dp_idx)
791 if (dp_idx < 0 || dp_idx > DP_MAX)
793 return rcu_dereference(dps[dp_idx]);
796 static int dp_genl_del(struct sk_buff *skb, struct genl_info *info)
801 if (!info->attrs[DP_GENL_A_DP_IDX])
804 mutex_lock(&dp_mutex);
805 dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX])));
812 mutex_unlock(&dp_mutex);
816 static struct genl_ops dp_genl_ops_del_dp = {
817 .cmd = DP_GENL_C_DEL_DP,
818 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
819 .policy = dp_genl_policy,
824 /* Queries a datapath for related information. Currently the only relevant
825 * information is the datapath's multicast group ID. Really we want one
826 * multicast group per datapath, but because of locking issues[*] we can't
827 * easily get one. Thus, every datapath will currently return the same
828 * global multicast group ID, but in the future it would be nice to fix that.
830 * [*] dp_genl_add, to add a new datapath, is called under the genl_lock
831 * mutex, and genl_register_mc_group, called to acquire a new multicast
832 * group ID, also acquires genl_lock, thus deadlock.
834 static int dp_genl_query(struct sk_buff *skb, struct genl_info *info)
837 struct sk_buff *ans_skb = NULL;
841 if (!info->attrs[DP_GENL_A_DP_IDX])
845 dp_idx = nla_get_u32((info->attrs[DP_GENL_A_DP_IDX]));
851 ans_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
856 data = genlmsg_put_reply(ans_skb, info, &dp_genl_family,
857 0, DP_GENL_C_QUERY_DP);
862 NLA_PUT_U32(ans_skb, DP_GENL_A_DP_IDX, dp_idx);
863 NLA_PUT_U32(ans_skb, DP_GENL_A_MC_GROUP, mc_group.id);
865 genlmsg_end(ans_skb, data);
866 err = genlmsg_reply(ans_skb, info);
879 * Fill flow entry for nl flow query. Called with rcu_lock
884 dp_fill_flow(struct ofp_flow_mod* ofm, struct swt_iterator* iter)
886 ofm->header.version = OFP_VERSION;
887 ofm->header.type = OFPT_FLOW_MOD;
888 ofm->header.length = htons(sizeof(struct ofp_flow_mod)
889 + sizeof(ofm->actions[0]));
890 ofm->header.xid = htonl(0);
892 ofm->match.wildcards = htons(iter->flow->key.wildcards);
893 ofm->match.in_port = iter->flow->key.in_port;
894 ofm->match.dl_vlan = iter->flow->key.dl_vlan;
895 memcpy(ofm->match.dl_src, iter->flow->key.dl_src, ETH_ALEN);
896 memcpy(ofm->match.dl_dst, iter->flow->key.dl_dst, ETH_ALEN);
897 ofm->match.dl_type = iter->flow->key.dl_type;
898 ofm->match.nw_src = iter->flow->key.nw_src;
899 ofm->match.nw_dst = iter->flow->key.nw_dst;
900 ofm->match.nw_proto = iter->flow->key.nw_proto;
901 ofm->match.tp_src = iter->flow->key.tp_src;
902 ofm->match.tp_dst = iter->flow->key.tp_dst;
903 ofm->group_id = iter->flow->group_id;
904 ofm->max_idle = iter->flow->max_idle;
905 /* TODO support multiple actions */
906 ofm->actions[0] = iter->flow->actions[0];
911 /* Convenience function */
914 dp_init_nl_flow_msg(uint32_t dp_idx, uint16_t table_idx,
915 struct genl_info *info, struct sk_buff* skb)
919 data = genlmsg_put_reply(skb, info, &dp_genl_family, 0,
920 DP_GENL_C_QUERY_FLOW);
923 NLA_PUT_U32(skb, DP_GENL_A_DP_IDX, dp_idx);
924 NLA_PUT_U16(skb, DP_GENL_A_TABLEIDX, table_idx);
932 /* Iterate through the specified table and send all flow entries over
933 * netlink to userspace. Each flow message has the following format:
937 * 32bit number of flows
938 * openflow-flow-entries
940 * The full table may require multiple messages. A message with 0 flows
941 * signifies end-of message.
946 dp_dump_table(struct datapath *dp, uint16_t table_idx, struct genl_info *info, struct ofp_flow_mod* matchme)
948 struct sk_buff *skb = 0;
949 struct sw_table *table = 0;
950 struct swt_iterator iter;
951 struct sw_flow_key in_flow;
953 int count = 0, sum_count = 0;
955 uint8_t* ofm_ptr = 0;
956 struct nlattr *num_attr;
959 table = dp->chain->tables[table_idx];
960 if ( table == NULL ) {
961 dprintk("dp::dp_dump_table error, non-existant table at position %d\n", table_idx);
965 if (!table->iterator(table, &iter)) {
966 dprintk("dp::dp_dump_table couldn't initialize empty table iterator\n");
972 /* verify that we can fit all NL_FLOWS_PER_MESSAGE in a single
974 if( (sizeof(dp_genl_family) + sizeof(uint32_t) + sizeof(uint16_t) + sizeof(uint32_t) +
975 (NL_FLOWS_PER_MESSAGE * sizeof(struct ofp_flow_mod))) > (8192 - 64)){
976 dprintk("dp::dp_dump_table NL_FLOWS_PER_MESSAGE may cause overrun in skbuf\n");
980 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
985 data = dp_init_nl_flow_msg(dp->dp_idx, table_idx, info, skb);
991 /* reserve space to put the number of flows for this message, to
992 * be filled after the loop*/
993 num_attr = nla_reserve(skb, DP_GENL_A_NUMFLOWS, sizeof(uint32_t));
999 /* Only load NL_FLOWS_PER_MESSAGE flows at a time */
1000 attr = nla_reserve(skb, DP_GENL_A_FLOW,
1001 (sizeof(struct ofp_flow_mod) + sizeof(struct ofp_action)) * NL_FLOWS_PER_MESSAGE);
1004 goto error_free_skb;
1007 /* internal loop to fill NL_FLOWS_PER_MESSAGE flows */
1008 ofm_ptr = nla_data(attr);
1009 flow_extract_match(&in_flow, &matchme->match);
1010 while (iter.flow && count < NL_FLOWS_PER_MESSAGE) {
1011 if(flow_matches(&in_flow, &iter.flow->key)){
1012 if((err = dp_fill_flow((struct ofp_flow_mod*)ofm_ptr, &iter)))
1013 goto error_free_skb;
1015 /* TODO support multiple actions */
1016 ofm_ptr += sizeof(struct ofp_flow_mod) + sizeof(struct ofp_action);
1018 table->iterator_next(&iter);
1021 *((uint32_t*)nla_data(num_attr)) = count;
1022 genlmsg_end(skb, data);
1027 err = genlmsg_unicast(skb, info->snd_pid);
1031 /* send a sentinal message saying we're done */
1032 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
1036 data = dp_init_nl_flow_msg(dp->dp_idx, table_idx, info, skb);
1039 goto error_free_skb;
1042 NLA_PUT_U32(skb, DP_GENL_A_NUMFLOWS, 0);
1043 /* dummy flow so nl doesn't complain */
1044 attr = nla_reserve(skb, DP_GENL_A_FLOW, sizeof(struct ofp_flow_mod));
1047 goto error_free_skb;
1049 genlmsg_end(skb, data);
1050 err = genlmsg_reply(skb, info); skb = 0;
1059 /* Helper function to query_table which creates and sends a message packed with
1060 * table stats. Message form is:
1064 * OFP_TABLE (list of OFP_TABLES)
1070 dp_dump_table_stats(struct datapath *dp, int dp_idx, struct genl_info *info)
1072 struct sk_buff *skb = 0;
1073 struct ofp_table *ot = 0;
1074 struct nlattr *attr;
1075 struct sw_table_stats stats;
1080 int nt = dp->chain->n_tables;
1082 len = 4 + 4 + (sizeof(struct ofp_table) * nt);
1084 /* u32 IDX, u32 NUMTABLES, list-of-tables */
1085 skb = nlmsg_new(MAX(len, NLMSG_GOODSIZE), GFP_ATOMIC);
1090 data = genlmsg_put_reply(skb, info, &dp_genl_family, 0,
1091 DP_GENL_C_QUERY_TABLE);
1096 NLA_PUT_U32(skb, DP_GENL_A_DP_IDX, dp_idx);
1097 NLA_PUT_U32(skb, DP_GENL_A_NUMTABLES, nt);
1099 /* ... we assume that all tables can fit in a single message.
1100 * Probably a reasonable assumption seeing that we only have
1102 attr = nla_reserve(skb, DP_GENL_A_TABLE, (sizeof(struct ofp_table) * nt));
1105 goto error_free_skb;
1108 ot = nla_data(attr);
1110 for (i = 0; i < nt; ++i) {
1111 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
1112 ot->header.version = OFP_VERSION;
1113 ot->header.type = OFPT_TABLE;
1114 ot->header.length = htons(sizeof(struct ofp_table));
1115 ot->header.xid = htonl(0);
1117 strncpy(ot->name, stats.name, OFP_MAX_TABLE_NAME_LEN);
1118 ot->table_id = htons(i);
1119 ot->n_flows = htonl(stats.n_flows);
1120 ot->max_flows = htonl(stats.max_flows);
1124 genlmsg_end(skb, data);
1125 err = genlmsg_reply(skb, info); skb = 0;
1135 * Queries a datapath for flow-table statistics
1139 static int dp_genl_table_query(struct sk_buff *skb, struct genl_info *info)
1141 struct datapath* dp;
1144 if (!info->attrs[DP_GENL_A_DP_IDX]) {
1145 dprintk("dp::dp_genl_table_query received message with missing attributes\n");
1150 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1156 err = dp_dump_table_stats(dp, nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]), info);
1164 * Queries a datapath for flow-table entries.
1167 static int dp_genl_flow_query(struct sk_buff *skb, struct genl_info *info)
1169 struct datapath* dp;
1170 struct ofp_flow_mod* ofm;
1174 if (!info->attrs[DP_GENL_A_DP_IDX]
1175 || !info->attrs[DP_GENL_A_TABLEIDX]
1176 || !info->attrs[DP_GENL_A_FLOW]) {
1177 dprintk("dp::dp_genl_flow_query received message with missing attributes\n");
1182 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1188 table_idx = nla_get_u16(info->attrs[DP_GENL_A_TABLEIDX]);
1190 if (dp->chain->n_tables <= table_idx){
1191 printk("table index %d invalid (dp has %d tables)\n",
1192 table_idx, dp->chain->n_tables);
1197 ofm = nla_data(info->attrs[DP_GENL_A_FLOW]);
1198 err = dp_dump_table(dp, table_idx, info, ofm);
1205 static struct nla_policy dp_genl_flow_policy[DP_GENL_A_MAX + 1] = {
1206 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1207 [DP_GENL_A_TABLEIDX] = { .type = NLA_U16 },
1208 [DP_GENL_A_NUMFLOWS] = { .type = NLA_U32 },
1211 static struct genl_ops dp_genl_ops_query_flow = {
1212 .cmd = DP_GENL_C_QUERY_FLOW,
1213 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1214 .policy = dp_genl_flow_policy,
1215 .doit = dp_genl_flow_query,
1219 static struct nla_policy dp_genl_table_policy[DP_GENL_A_MAX + 1] = {
1220 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1223 static struct genl_ops dp_genl_ops_query_table = {
1224 .cmd = DP_GENL_C_QUERY_TABLE,
1225 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1226 .policy = dp_genl_table_policy,
1227 .doit = dp_genl_table_query,
1232 static struct genl_ops dp_genl_ops_query_dp = {
1233 .cmd = DP_GENL_C_QUERY_DP,
1234 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1235 .policy = dp_genl_policy,
1236 .doit = dp_genl_query,
1240 static int dp_genl_add_del_port(struct sk_buff *skb, struct genl_info *info)
1242 struct datapath *dp;
1243 struct net_device *port;
1246 if (!info->attrs[DP_GENL_A_DP_IDX] || !info->attrs[DP_GENL_A_PORTNAME])
1250 mutex_lock(&dp_mutex);
1251 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1257 /* Get interface to add/remove. */
1258 port = dev_get_by_name(&init_net,
1259 nla_data(info->attrs[DP_GENL_A_PORTNAME]));
1265 /* Execute operation. */
1266 if (info->genlhdr->cmd == DP_GENL_C_ADD_PORT)
1267 err = add_switch_port(dp, port);
1269 if (port->br_port == NULL || port->br_port->dp != dp) {
1273 err = del_switch_port(port->br_port);
1279 mutex_unlock(&dp_mutex);
1283 static struct genl_ops dp_genl_ops_add_port = {
1284 .cmd = DP_GENL_C_ADD_PORT,
1285 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1286 .policy = dp_genl_policy,
1287 .doit = dp_genl_add_del_port,
1291 static struct genl_ops dp_genl_ops_del_port = {
1292 .cmd = DP_GENL_C_DEL_PORT,
1293 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1294 .policy = dp_genl_policy,
1295 .doit = dp_genl_add_del_port,
1299 static int dp_genl_openflow(struct sk_buff *skb, struct genl_info *info)
1301 struct nlattr *va = info->attrs[DP_GENL_A_OPENFLOW];
1302 struct datapath *dp;
1303 struct ofp_header *oh;
1304 struct sender sender;
1307 if (!info->attrs[DP_GENL_A_DP_IDX] || !va)
1311 dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
1317 va = info->attrs[DP_GENL_A_OPENFLOW];
1318 if (nla_len(va) < sizeof(struct ofp_header)) {
1324 sender.xid = oh->xid;
1325 sender.pid = info->snd_pid;
1326 sender.seq = info->snd_seq;
1327 err = fwd_control_input(dp->chain, &sender, nla_data(va), nla_len(va));
1334 static struct nla_policy dp_genl_openflow_policy[DP_GENL_A_MAX + 1] = {
1335 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1338 static struct genl_ops dp_genl_ops_openflow = {
1339 .cmd = DP_GENL_C_OPENFLOW,
1340 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1341 .policy = dp_genl_openflow_policy,
1342 .doit = dp_genl_openflow,
1346 static struct nla_policy dp_genl_benchmark_policy[DP_GENL_A_MAX + 1] = {
1347 [DP_GENL_A_DP_IDX] = { .type = NLA_U32 },
1348 [DP_GENL_A_NPACKETS] = { .type = NLA_U32 },
1349 [DP_GENL_A_PSIZE] = { .type = NLA_U32 },
1352 static struct genl_ops dp_genl_ops_benchmark_nl = {
1353 .cmd = DP_GENL_C_BENCHMARK_NL,
1354 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1355 .policy = dp_genl_benchmark_policy,
1356 .doit = dp_genl_benchmark_nl,
1360 static struct genl_ops *dp_genl_all_ops[] = {
1361 /* Keep this operation first. Generic Netlink dispatching
1362 * looks up operations with linear search, so we want it at the
1364 &dp_genl_ops_openflow,
1366 &dp_genl_ops_query_flow,
1367 &dp_genl_ops_query_table,
1368 &dp_genl_ops_add_dp,
1369 &dp_genl_ops_del_dp,
1370 &dp_genl_ops_query_dp,
1371 &dp_genl_ops_add_port,
1372 &dp_genl_ops_del_port,
1373 &dp_genl_ops_benchmark_nl,
1376 static int dp_init_netlink(void)
1381 err = genl_register_family(&dp_genl_family);
1385 for (i = 0; i < ARRAY_SIZE(dp_genl_all_ops); i++) {
1386 err = genl_register_ops(&dp_genl_family, dp_genl_all_ops[i]);
1388 goto err_unregister;
1391 strcpy(mc_group.name, "openflow");
1392 err = genl_register_mc_group(&dp_genl_family, &mc_group);
1394 goto err_unregister;
1399 genl_unregister_family(&dp_genl_family);
1403 static void dp_uninit_netlink(void)
1405 genl_unregister_family(&dp_genl_family);
1408 #define DRV_NAME "openflow"
1409 #define DRV_VERSION VERSION
1410 #define DRV_DESCRIPTION "OpenFlow switching datapath implementation"
1411 #define DRV_COPYRIGHT "Copyright (c) 2007, 2008 The Board of Trustees of The Leland Stanford Junior University"
1414 static int __init dp_init(void)
1418 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION "\n");
1419 printk(KERN_INFO DRV_NAME ": " VERSION" built on "__DATE__" "__TIME__"\n");
1420 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
1426 err = dp_init_netlink();
1428 goto error_flow_exit;
1430 /* Hook into callback used by the bridge to intercept packets.
1431 * Parasites we are. */
1432 if (br_handle_frame_hook)
1433 printk("openflow: hijacking bridge hook\n");
1434 br_handle_frame_hook = dp_frame_hook;
1441 printk(KERN_EMERG "openflow: failed to install!");
1445 static void dp_cleanup(void)
1448 dp_uninit_netlink();
1450 br_handle_frame_hook = NULL;
1453 module_init(dp_init);
1454 module_exit(dp_cleanup);
1456 MODULE_DESCRIPTION(DRV_DESCRIPTION);
1457 MODULE_AUTHOR(DRV_COPYRIGHT);
1458 MODULE_LICENSE("GPL");