2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <asm/uaccess.h>
17 #include <linux/types.h>
18 #include <net/checksum.h>
24 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
26 static int make_writable(struct sk_buff **);
28 static struct sk_buff *retrieve_skb(uint32_t id);
29 static void discard_skb(uint32_t id);
31 /* 'skb' was received on port 'p', which may be a physical switch port, the
32 * local port, or a null pointer. Process it according to 'chain'. Returns 0
33 * if successful, in which case 'skb' is destroyed, or -ESRCH if there is no
34 * matching flow, in which case 'skb' still belongs to the caller. */
35 int run_flow_through_tables(struct sw_chain *chain, struct sk_buff *skb,
36 struct net_bridge_port *p)
38 /* Ethernet address used as the destination for STP frames. */
39 static const uint8_t stp_eth_addr[ETH_ALEN]
40 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
41 struct sw_flow_key key;
44 if (flow_extract(skb, p ? p->port_no : OFPP_NONE, &key)
45 && (chain->dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
50 if (p && p->config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
51 p->config & (compare_ether_addr(key.dl_dst, stp_eth_addr)
52 ? OFPPC_NO_RECV : OFPPC_NO_RECV_STP)) {
57 flow = chain_lookup(chain, &key);
58 if (likely(flow != NULL)) {
59 struct sw_flow_actions *sf_acts = rcu_dereference(flow->sf_acts);
61 execute_actions(chain->dp, skb, &key,
62 sf_acts->actions, sf_acts->n_actions, 0);
69 /* 'skb' was received on port 'p', which may be a physical switch port, the
70 * local port, or a null pointer. Process it according to 'chain', sending it
71 * up to the controller if no flow matches. Takes ownership of 'skb'. */
72 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb,
73 struct net_bridge_port *p)
75 if (run_flow_through_tables(chain, skb, p))
76 dp_output_control(chain->dp, skb, fwd_save_skb(skb),
77 chain->dp->miss_send_len,
81 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
82 int out_port, int ignore_no_fwd)
86 return (likely(out_port != OFPP_CONTROLLER)
87 ? dp_output_port(dp, skb, out_port, ignore_no_fwd)
88 : dp_output_control(dp, skb, fwd_save_skb(skb),
89 max_len, OFPR_ACTION));
92 void execute_actions(struct datapath *dp, struct sk_buff *skb,
93 struct sw_flow_key *key,
94 const struct ofp_action *actions, int n_actions,
97 /* Every output action needs a separate clone of 'skb', but the common
98 * case is just a single output action, so that doing a clone and
99 * then freeing the original skbuff is wasteful. So the following code
100 * is slightly obscure just to avoid that. */
102 size_t max_len=0; /* Initialze to make compiler happy */
107 eth_proto = ntohs(key->dl_type);
109 for (i = 0; i < n_actions; i++) {
110 const struct ofp_action *a = &actions[i];
112 if (prev_port != -1) {
113 do_output(dp, skb_clone(skb, GFP_ATOMIC),
114 max_len, prev_port, ignore_no_fwd);
118 if (likely(a->type == htons(OFPAT_OUTPUT))) {
119 prev_port = ntohs(a->arg.output.port);
120 max_len = ntohs(a->arg.output.max_len);
122 if (!make_writable(&skb)) {
124 printk("make_writable failed\n");
127 skb = execute_setter(skb, eth_proto, key, a);
130 printk("execute_setter lost skb\n");
136 do_output(dp, skb, max_len, prev_port, ignore_no_fwd);
141 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
142 * covered by the sum has been changed from 'from' to 'to'. If set,
143 * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
144 * Based on nf_proto_csum_replace4. */
145 static void update_csum(__sum16 *sum, struct sk_buff *skb,
146 __be32 from, __be32 to, int pseudohdr)
148 __be32 diff[] = { ~from, to };
149 if (skb->ip_summed != CHECKSUM_PARTIAL) {
150 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
151 ~csum_unfold(*sum)));
152 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
153 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
155 } else if (pseudohdr)
156 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
160 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
161 uint8_t nw_proto, const struct ofp_action *a)
163 if (eth_proto == ETH_P_IP) {
164 struct iphdr *nh = ip_hdr(skb);
165 uint32_t new, *field;
167 new = a->arg.nw_addr;
169 if (a->type == htons(OFPAT_SET_NW_SRC))
174 if (nw_proto == IPPROTO_TCP) {
175 struct tcphdr *th = tcp_hdr(skb);
176 update_csum(&th->check, skb, *field, new, 1);
177 } else if (nw_proto == IPPROTO_UDP) {
178 struct udphdr *th = udp_hdr(skb);
179 update_csum(&th->check, skb, *field, new, 1);
181 update_csum(&nh->check, skb, *field, new, 0);
186 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
187 uint8_t nw_proto, const struct ofp_action *a)
189 if (eth_proto == ETH_P_IP) {
190 uint16_t new, *field;
194 if (nw_proto == IPPROTO_TCP) {
195 struct tcphdr *th = tcp_hdr(skb);
197 if (a->type == htons(OFPAT_SET_TP_SRC))
202 update_csum(&th->check, skb, *field, new, 1);
204 } else if (nw_proto == IPPROTO_UDP) {
205 struct udphdr *th = udp_hdr(skb);
207 if (a->type == htons(OFPAT_SET_TP_SRC))
212 update_csum(&th->check, skb, *field, new, 1);
218 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
220 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
224 /* Verify we were given a vlan packet */
225 if (vh->h_vlan_proto != htons(ETH_P_8021Q))
228 memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
230 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
232 skb->protocol = eh->h_proto;
233 skb->mac_header += VLAN_HLEN;
238 static struct sk_buff *modify_vlan_tci(struct sk_buff *skb,
239 struct sw_flow_key *key, uint16_t tci, uint16_t mask)
241 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
243 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
244 /* Modify vlan id, but maintain other TCI values */
245 vh->h_vlan_TCI = (vh->h_vlan_TCI & ~(htons(mask))) | htons(tci);
247 /* Add vlan header */
249 /* xxx The vlan_put_tag function, doesn't seem to work
250 * xxx reliably when it attempts to use the hardware-accelerated
251 * xxx version. We'll directly use the software version
252 * xxx until the problem can be diagnosed.
254 skb = __vlan_put_tag(skb, tci);
255 vh = vlan_eth_hdr(skb);
257 key->dl_vlan = vh->h_vlan_TCI & htons(VLAN_VID_MASK);
262 /* Mask for the priority bits in a vlan header. The kernel doesn't
263 * define this like it does for VID. */
264 #define VLAN_PCP_MASK 0xe000
266 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
267 struct sw_flow_key *key, const struct ofp_action *a)
269 switch (ntohs(a->type)) {
270 case OFPAT_SET_VLAN_VID: {
271 uint16_t tci = ntohs(a->arg.vlan_vid);
272 skb = modify_vlan_tci(skb, key, tci, VLAN_VID_MASK);
276 case OFPAT_SET_VLAN_PCP: {
277 uint16_t tci = (uint16_t)a->arg.vlan_pcp << 13;
278 skb = modify_vlan_tci(skb, key, tci, VLAN_PCP_MASK);
282 case OFPAT_STRIP_VLAN:
284 key->dl_vlan = htons(OFP_VLAN_NONE);
287 case OFPAT_SET_DL_SRC: {
288 struct ethhdr *eh = eth_hdr(skb);
289 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
292 case OFPAT_SET_DL_DST: {
293 struct ethhdr *eh = eth_hdr(skb);
294 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
298 case OFPAT_SET_NW_SRC:
299 case OFPAT_SET_NW_DST:
300 modify_nh(skb, eth_proto, key->nw_proto, a);
303 case OFPAT_SET_TP_SRC:
304 case OFPAT_SET_TP_DST:
305 modify_th(skb, eth_proto, key->nw_proto, a);
310 printk("execute_setter: unknown action: %d\n", ntohs(a->type));
317 recv_hello(struct sw_chain *chain, const struct sender *sender,
320 return dp_send_hello(chain->dp, sender, msg);
324 recv_features_request(struct sw_chain *chain, const struct sender *sender,
327 return dp_send_features_reply(chain->dp, sender);
331 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
334 return dp_send_config_reply(chain->dp, sender);
338 recv_set_config(struct sw_chain *chain, const struct sender *sender,
341 const struct ofp_switch_config *osc = msg;
344 flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
345 if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
346 && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
347 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
349 chain->dp->flags = flags;
351 chain->dp->miss_send_len = ntohs(osc->miss_send_len);
357 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
360 const struct ofp_packet_out *opo = msg;
362 struct vlan_ethhdr *mac;
364 struct sw_flow_key key;
365 int n_actions = ntohs(opo->n_actions);
366 int act_len = n_actions * sizeof opo->actions[0];
368 if (act_len > (ntohs(opo->header.length) - sizeof *opo)) {
370 printk("message too short for number of actions\n");
374 if (ntohl(opo->buffer_id) == (uint32_t) -1) {
375 int data_len = ntohs(opo->header.length) - sizeof *opo - act_len;
377 /* FIXME: there is likely a way to reuse the data in msg. */
378 skb = alloc_skb(data_len, GFP_ATOMIC);
382 /* FIXME? We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
383 * we're just transmitting this raw without examining anything
384 * at those layers. */
385 memcpy(skb_put(skb, data_len), &opo->actions[n_actions], data_len);
387 skb_set_mac_header(skb, 0);
388 mac = vlan_eth_hdr(skb);
389 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
390 nh_ofs = sizeof(struct ethhdr);
392 nh_ofs = sizeof(struct vlan_ethhdr);
393 skb_set_network_header(skb, nh_ofs);
395 skb = retrieve_skb(ntohl(opo->buffer_id));
400 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
402 flow_extract(skb, ntohs(opo->in_port), &key);
403 execute_actions(chain->dp, skb, &key, opo->actions, n_actions, 1);
409 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
412 const struct ofp_port_mod *opm = msg;
414 dp_update_port_flags(chain->dp, opm);
420 recv_echo_request(struct sw_chain *chain, const struct sender *sender,
423 return dp_send_echo_reply(chain->dp, sender, msg);
427 recv_echo_reply(struct sw_chain *chain, const struct sender *sender,
434 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
439 struct sw_flow *flow;
442 /* To prevent loops, make sure there's no action to send to the
443 * OFP_TABLE virtual port.
445 n_actions = (ntohs(ofm->header.length) - sizeof *ofm)
446 / sizeof *ofm->actions;
447 for (i=0; i<n_actions; i++) {
448 const struct ofp_action *a = &ofm->actions[i];
450 if (a->type == htons(OFPAT_OUTPUT)
451 && (a->arg.output.port == htons(OFPP_TABLE)
452 || a->arg.output.port == htons(OFPP_NONE)
453 || a->arg.output.port == ofm->match.in_port)) {
454 /* xxx Send fancy new error message? */
459 /* Allocate memory. */
460 flow = flow_alloc(n_actions, GFP_ATOMIC);
465 flow_extract_match(&flow->key, &ofm->match);
466 flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
467 flow->idle_timeout = ntohs(ofm->idle_timeout);
468 flow->hard_timeout = ntohs(ofm->hard_timeout);
469 flow->used = jiffies;
470 flow->init_time = jiffies;
471 flow->byte_count = 0;
472 flow->packet_count = 0;
473 spin_lock_init(&flow->lock);
474 memcpy(flow->sf_acts->actions, ofm->actions,
475 n_actions * sizeof *flow->sf_acts->actions);
478 error = chain_insert(chain, flow);
480 goto error_free_flow;
482 if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
483 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
485 struct sw_flow_key key;
486 flow_used(flow, skb);
487 flow_extract(skb, ntohs(ofm->match.in_port), &key);
488 execute_actions(chain->dp, skb, &key, ofm->actions, n_actions, 0);
498 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
499 discard_skb(ntohl(ofm->buffer_id));
504 mod_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
509 struct sw_flow_key key;
513 /* To prevent loops, make sure there's no action to send to the
514 * OFP_TABLE virtual port.
516 n_actions = (ntohs(ofm->header.length) - sizeof *ofm)
517 / sizeof *ofm->actions;
518 for (i=0; i<n_actions; i++) {
519 const struct ofp_action *a = &ofm->actions[i];
521 if (a->type == htons(OFPAT_OUTPUT)
522 && (a->arg.output.port == htons(OFPP_TABLE)
523 || a->arg.output.port == htons(OFPP_NONE)
524 || a->arg.output.port == ofm->match.in_port)) {
525 /* xxx Send fancy new error message? */
530 flow_extract_match(&key, &ofm->match);
531 priority = key.wildcards ? ntohs(ofm->priority) : -1;
532 strict = (ofm->command == htons(OFPFC_MODIFY_STRICT)) ? 1 : 0;
533 chain_modify(chain, &key, priority, strict, ofm->actions, n_actions);
535 if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
536 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
538 struct sw_flow_key skb_key;
539 flow_extract(skb, ntohs(ofm->match.in_port), &skb_key);
540 execute_actions(chain->dp, skb, &skb_key,
541 ofm->actions, n_actions, 0);
549 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
550 discard_skb(ntohl(ofm->buffer_id));
555 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
557 const struct ofp_flow_mod *ofm = msg;
558 uint16_t command = ntohs(ofm->command);
560 if (command == OFPFC_ADD) {
561 return add_flow(chain, ofm);
562 } else if ((command == OFPFC_MODIFY) || (command == OFPFC_MODIFY_STRICT)) {
563 return mod_flow(chain, ofm);
564 } else if (command == OFPFC_DELETE) {
565 struct sw_flow_key key;
566 flow_extract_match(&key, &ofm->match);
567 return chain_delete(chain, &key, 0, 0) ? 0 : -ESRCH;
568 } else if (command == OFPFC_DELETE_STRICT) {
569 struct sw_flow_key key;
571 flow_extract_match(&key, &ofm->match);
572 priority = key.wildcards ? ntohs(ofm->priority) : -1;
573 return chain_delete(chain, &key, priority, 1) ? 0 : -ESRCH;
579 /* 'msg', which is 'length' bytes long, was received across Netlink from
580 * 'sender'. Apply it to 'chain'. */
582 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
583 const void *msg, size_t length)
586 struct openflow_packet {
588 int (*handler)(struct sw_chain *, const struct sender *,
592 static const struct openflow_packet packets[] = {
594 sizeof (struct ofp_header),
597 [OFPT_FEATURES_REQUEST] = {
598 sizeof (struct ofp_header),
599 recv_features_request,
601 [OFPT_GET_CONFIG_REQUEST] = {
602 sizeof (struct ofp_header),
603 recv_get_config_request,
605 [OFPT_SET_CONFIG] = {
606 sizeof (struct ofp_switch_config),
609 [OFPT_PACKET_OUT] = {
610 sizeof (struct ofp_packet_out),
614 sizeof (struct ofp_flow_mod),
618 sizeof (struct ofp_port_mod),
621 [OFPT_ECHO_REQUEST] = {
622 sizeof (struct ofp_header),
625 [OFPT_ECHO_REPLY] = {
626 sizeof (struct ofp_header),
631 struct ofp_header *oh;
633 oh = (struct ofp_header *) msg;
634 if (oh->version != OFP_VERSION
635 && oh->type != OFPT_HELLO
636 && oh->type != OFPT_ERROR
637 && oh->type != OFPT_ECHO_REQUEST
638 && oh->type != OFPT_ECHO_REPLY
639 && oh->type != OFPT_VENDOR)
641 dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
642 OFPBRC_BAD_VERSION, msg, length);
645 if (ntohs(oh->length) > length)
648 if (oh->type < ARRAY_SIZE(packets)) {
649 const struct openflow_packet *pkt = &packets[oh->type];
651 if (length < pkt->min_size)
653 return pkt->handler(chain, sender, msg);
656 dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
657 OFPBRC_BAD_TYPE, msg, length);
661 /* Packet buffering. */
663 #define OVERWRITE_SECS 1
664 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
666 struct packet_buffer {
669 unsigned long exp_jiffies;
672 static struct packet_buffer buffers[N_PKT_BUFFERS];
673 static unsigned int buffer_idx;
674 static DEFINE_SPINLOCK(buffer_lock);
676 uint32_t fwd_save_skb(struct sk_buff *skb)
678 struct sk_buff *old_skb = NULL;
679 struct packet_buffer *p;
680 unsigned long int flags;
683 spin_lock_irqsave(&buffer_lock, flags);
684 buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
685 p = &buffers[buffer_idx];
687 /* Don't buffer packet if existing entry is less than
688 * OVERWRITE_SECS old. */
689 if (time_before(jiffies, p->exp_jiffies)) {
690 spin_unlock_irqrestore(&buffer_lock, flags);
693 /* Defer kfree_skb() until interrupts re-enabled. */
697 /* Don't use maximum cookie value since the all-bits-1 id is
699 if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
703 p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
704 id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
705 spin_unlock_irqrestore(&buffer_lock, flags);
713 static struct sk_buff *retrieve_skb(uint32_t id)
715 unsigned long int flags;
716 struct sk_buff *skb = NULL;
717 struct packet_buffer *p;
719 spin_lock_irqsave(&buffer_lock, flags);
720 p = &buffers[id & PKT_BUFFER_MASK];
721 if (p->cookie == id >> PKT_BUFFER_BITS) {
725 printk("cookie mismatch: %x != %x\n",
726 id >> PKT_BUFFER_BITS, p->cookie);
728 spin_unlock_irqrestore(&buffer_lock, flags);
733 void fwd_discard_all(void)
737 for (i = 0; i < N_PKT_BUFFERS; i++) {
739 unsigned long int flags;
741 /* Defer kfree_skb() until interrupts re-enabled. */
742 spin_lock_irqsave(&buffer_lock, flags);
743 skb = buffers[i].skb;
744 buffers[i].skb = NULL;
745 spin_unlock_irqrestore(&buffer_lock, flags);
751 static void discard_skb(uint32_t id)
753 struct sk_buff *old_skb = NULL;
754 unsigned long int flags;
755 struct packet_buffer *p;
757 spin_lock_irqsave(&buffer_lock, flags);
758 p = &buffers[id & PKT_BUFFER_MASK];
759 if (p->cookie == id >> PKT_BUFFER_BITS) {
760 /* Defer kfree_skb() until interrupts re-enabled. */
764 spin_unlock_irqrestore(&buffer_lock, flags);
775 /* Utility functions. */
777 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
779 * Returns 1 if successful, 0 on failure. */
781 make_writable(struct sk_buff **pskb)
783 /* Based on skb_make_writable() in net/netfilter/core.c. */
784 struct sk_buff *nskb;
786 /* Not exclusive use of packet? Must copy. */
787 if (skb_shared(*pskb) || skb_cloned(*pskb))
790 return pskb_may_pull(*pskb, 40); /* FIXME? */
793 nskb = skb_copy(*pskb, GFP_ATOMIC);
796 BUG_ON(skb_is_nonlinear(nskb));
798 /* Rest of kernel will get very unhappy if we pass it a
799 suddenly-orphaned skbuff */
801 skb_set_owner_w(nskb, (*pskb)->sk);