2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <asm/uaccess.h>
12 #include <linux/types.h>
15 #include "openflow/nicira-ext.h"
21 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
24 static struct sk_buff *retrieve_skb(uint32_t id);
25 static void discard_skb(uint32_t id);
27 /* 'skb' was received on port 'p', which may be a physical switch port, the
28 * local port, or a null pointer. Process it according to 'chain'. Returns 0
29 * if successful, in which case 'skb' is destroyed, or -ESRCH if there is no
30 * matching flow, in which case 'skb' still belongs to the caller. */
31 int run_flow_through_tables(struct sw_chain *chain, struct sk_buff *skb,
32 struct net_bridge_port *p)
34 /* Ethernet address used as the destination for STP frames. */
35 static const uint8_t stp_eth_addr[ETH_ALEN]
36 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
37 struct sw_flow_key key;
40 if (flow_extract(skb, p ? p->port_no : OFPP_NONE, &key)
41 && (chain->dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
46 if (p && p->config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
47 p->config & (compare_ether_addr(key.dl_dst, stp_eth_addr)
48 ? OFPPC_NO_RECV : OFPPC_NO_RECV_STP)) {
53 flow = chain_lookup(chain, &key);
54 if (likely(flow != NULL)) {
55 struct sw_flow_actions *sf_acts = rcu_dereference(flow->sf_acts);
57 execute_actions(chain->dp, skb, &key,
58 sf_acts->actions, sf_acts->actions_len, 0);
65 /* 'skb' was received on port 'p', which may be a physical switch port, the
66 * local port, or a null pointer. Process it according to 'chain', sending it
67 * up to the controller if no flow matches. Takes ownership of 'skb'. */
68 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb,
69 struct net_bridge_port *p)
71 WARN_ON_ONCE(skb_shared(skb));
72 WARN_ON_ONCE(skb->destructor);
73 if (run_flow_through_tables(chain, skb, p))
74 dp_output_control(chain->dp, skb, fwd_save_skb(skb),
75 chain->dp->miss_send_len,
80 recv_hello(struct sw_chain *chain, const struct sender *sender,
83 return dp_send_hello(chain->dp, sender, msg);
87 recv_features_request(struct sw_chain *chain, const struct sender *sender,
90 return dp_send_features_reply(chain->dp, sender);
94 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
97 return dp_send_config_reply(chain->dp, sender);
101 recv_set_config(struct sw_chain *chain, const struct sender *sender,
104 const struct ofp_switch_config *osc = msg;
107 flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
108 if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
109 && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
110 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
112 chain->dp->flags = flags;
114 chain->dp->miss_send_len = ntohs(osc->miss_send_len);
120 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
123 const struct ofp_packet_out *opo = msg;
125 struct vlan_ethhdr *mac;
128 struct sw_flow_key key;
129 size_t actions_len = ntohs(opo->actions_len);
131 if (actions_len > (ntohs(opo->header.length) - sizeof *opo)) {
133 printk("message too short for number of actions\n");
137 if (ntohl(opo->buffer_id) == (uint32_t) -1) {
138 int data_len = ntohs(opo->header.length) - sizeof *opo - actions_len;
140 /* FIXME: there is likely a way to reuse the data in msg. */
141 skb = alloc_skb(data_len, GFP_ATOMIC);
145 /* FIXME? We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
146 * we're just transmitting this raw without examining anything
147 * at those layers. */
148 memcpy(skb_put(skb, data_len), (uint8_t *)opo->actions + actions_len,
151 skb_set_mac_header(skb, 0);
152 mac = vlan_eth_hdr(skb);
153 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
154 nh_ofs = sizeof(struct ethhdr);
156 nh_ofs = sizeof(struct vlan_ethhdr);
157 skb_set_network_header(skb, nh_ofs);
159 skb = retrieve_skb(ntohl(opo->buffer_id));
164 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
166 flow_extract(skb, ntohs(opo->in_port), &key);
168 v_code = validate_actions(chain->dp, &key, opo->actions, actions_len);
169 if (v_code != ACT_VALIDATION_OK) {
170 dp_send_error_msg(chain->dp, sender, OFPET_BAD_ACTION, v_code,
171 msg, ntohs(opo->header.length));
175 execute_actions(chain->dp, skb, &key, opo->actions, actions_len, 1);
185 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
188 const struct ofp_port_mod *opm = msg;
190 dp_update_port_flags(chain->dp, opm);
196 recv_echo_request(struct sw_chain *chain, const struct sender *sender,
199 return dp_send_echo_reply(chain->dp, sender, msg);
203 recv_echo_reply(struct sw_chain *chain, const struct sender *sender,
210 add_flow(struct sw_chain *chain, const struct sender *sender,
211 const struct ofp_flow_mod *ofm)
215 struct sw_flow *flow;
216 size_t actions_len = ntohs(ofm->header.length) - sizeof *ofm;
218 /* Allocate memory. */
219 flow = flow_alloc(actions_len, GFP_ATOMIC);
223 flow_extract_match(&flow->key, &ofm->match);
225 v_code = validate_actions(chain->dp, &flow->key, ofm->actions, actions_len);
226 if (v_code != ACT_VALIDATION_OK) {
227 dp_send_error_msg(chain->dp, sender, OFPET_BAD_ACTION, v_code,
228 ofm, ntohs(ofm->header.length));
229 goto error_free_flow;
233 flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
234 flow->idle_timeout = ntohs(ofm->idle_timeout);
235 flow->hard_timeout = ntohs(ofm->hard_timeout);
236 flow->used = jiffies;
237 flow->init_time = jiffies;
238 flow->byte_count = 0;
239 flow->packet_count = 0;
240 spin_lock_init(&flow->lock);
241 memcpy(flow->sf_acts->actions, ofm->actions, actions_len);
244 error = chain_insert(chain, flow);
245 if (error == -ENOBUFS) {
246 dp_send_error_msg(chain->dp, sender, OFPET_FLOW_MOD_FAILED,
247 OFPFMFC_ALL_TABLES_FULL, ofm, ntohs(ofm->header.length));
248 goto error_free_flow;
250 goto error_free_flow;
252 if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
253 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
255 struct sw_flow_key key;
256 flow_used(flow, skb);
257 dp_set_origin(chain->dp, ntohs(ofm->match.in_port), skb);
258 flow_extract(skb, ntohs(ofm->match.in_port), &key);
259 execute_actions(chain->dp, skb, &key, ofm->actions, actions_len, 0);
269 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
270 discard_skb(ntohl(ofm->buffer_id));
275 mod_flow(struct sw_chain *chain, const struct sender *sender,
276 const struct ofp_flow_mod *ofm)
281 struct sw_flow_key key;
285 flow_extract_match(&key, &ofm->match);
287 actions_len = ntohs(ofm->header.length) - sizeof *ofm;
289 v_code = validate_actions(chain->dp, &key, ofm->actions, actions_len);
290 if (v_code != ACT_VALIDATION_OK) {
291 dp_send_error_msg(chain->dp, sender, OFPET_BAD_ACTION, v_code,
292 ofm, ntohs(ofm->header.length));
296 priority = key.wildcards ? ntohs(ofm->priority) : -1;
297 strict = (ofm->command == htons(OFPFC_MODIFY_STRICT)) ? 1 : 0;
298 chain_modify(chain, &key, priority, strict, ofm->actions, actions_len);
300 if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
301 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
303 struct sw_flow_key skb_key;
304 flow_extract(skb, ntohs(ofm->match.in_port), &skb_key);
305 execute_actions(chain->dp, skb, &skb_key,
306 ofm->actions, actions_len, 0);
314 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
315 discard_skb(ntohl(ofm->buffer_id));
320 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
322 const struct ofp_flow_mod *ofm = msg;
323 uint16_t command = ntohs(ofm->command);
325 if (command == OFPFC_ADD) {
326 return add_flow(chain, sender, ofm);
327 } else if ((command == OFPFC_MODIFY) || (command == OFPFC_MODIFY_STRICT)) {
328 return mod_flow(chain, sender, ofm);
329 } else if (command == OFPFC_DELETE) {
330 struct sw_flow_key key;
331 flow_extract_match(&key, &ofm->match);
332 return chain_delete(chain, &key, 0, 0) ? 0 : -ESRCH;
333 } else if (command == OFPFC_DELETE_STRICT) {
334 struct sw_flow_key key;
336 flow_extract_match(&key, &ofm->match);
337 priority = key.wildcards ? ntohs(ofm->priority) : -1;
338 return chain_delete(chain, &key, priority, 1) ? 0 : -ESRCH;
345 recv_vendor(struct sw_chain *chain, const struct sender *sender,
348 const struct ofp_vendor_header *ovh = msg;
350 switch(ntohl(ovh->vendor))
353 return nx_recv_msg(chain, sender, msg);
356 printk("Uknown vendor: %#x\n", ntohl(ovh->vendor));
357 dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
358 OFPBRC_BAD_VENDOR, msg, ntohs(ovh->header.length));
363 /* 'msg', which is 'length' bytes long, was received across Netlink from
364 * 'sender'. Apply it to 'chain'. */
366 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
367 const void *msg, size_t length)
370 struct openflow_packet {
372 int (*handler)(struct sw_chain *, const struct sender *,
376 static const struct openflow_packet packets[] = {
378 sizeof (struct ofp_header),
381 [OFPT_ECHO_REQUEST] = {
382 sizeof (struct ofp_header),
385 [OFPT_ECHO_REPLY] = {
386 sizeof (struct ofp_header),
390 sizeof (struct ofp_vendor_header),
393 [OFPT_FEATURES_REQUEST] = {
394 sizeof (struct ofp_header),
395 recv_features_request,
397 [OFPT_GET_CONFIG_REQUEST] = {
398 sizeof (struct ofp_header),
399 recv_get_config_request,
401 [OFPT_SET_CONFIG] = {
402 sizeof (struct ofp_switch_config),
405 [OFPT_PACKET_OUT] = {
406 sizeof (struct ofp_packet_out),
410 sizeof (struct ofp_flow_mod),
414 sizeof (struct ofp_port_mod),
419 struct ofp_header *oh;
421 oh = (struct ofp_header *) msg;
422 if (oh->version != OFP_VERSION
423 && oh->type != OFPT_HELLO
424 && oh->type != OFPT_ERROR
425 && oh->type != OFPT_ECHO_REQUEST
426 && oh->type != OFPT_ECHO_REPLY
427 && oh->type != OFPT_VENDOR)
429 dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
430 OFPBRC_BAD_VERSION, msg, length);
433 if (ntohs(oh->length) != length) {
435 printk("received message length wrong: %d/%d\n",
436 ntohs(oh->length), length);
440 if (oh->type < ARRAY_SIZE(packets)) {
441 const struct openflow_packet *pkt = &packets[oh->type];
443 if (length < pkt->min_size)
445 return pkt->handler(chain, sender, msg);
448 dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
449 OFPBRC_BAD_TYPE, msg, length);
453 /* Packet buffering. */
455 #define OVERWRITE_SECS 1
456 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
458 struct packet_buffer {
461 unsigned long exp_jiffies;
464 static struct packet_buffer buffers[N_PKT_BUFFERS];
465 static unsigned int buffer_idx;
466 static DEFINE_SPINLOCK(buffer_lock);
468 uint32_t fwd_save_skb(struct sk_buff *skb)
470 struct sk_buff *old_skb = NULL;
471 struct packet_buffer *p;
472 unsigned long int flags;
475 spin_lock_irqsave(&buffer_lock, flags);
476 buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
477 p = &buffers[buffer_idx];
479 /* Don't buffer packet if existing entry is less than
480 * OVERWRITE_SECS old. */
481 if (time_before(jiffies, p->exp_jiffies)) {
482 spin_unlock_irqrestore(&buffer_lock, flags);
485 /* Defer kfree_skb() until interrupts re-enabled. */
489 /* Don't use maximum cookie value since the all-bits-1 id is
491 if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
495 p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
496 id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
497 spin_unlock_irqrestore(&buffer_lock, flags);
505 static struct sk_buff *retrieve_skb(uint32_t id)
507 unsigned long int flags;
508 struct sk_buff *skb = NULL;
509 struct packet_buffer *p;
511 spin_lock_irqsave(&buffer_lock, flags);
512 p = &buffers[id & PKT_BUFFER_MASK];
513 if (p->cookie == id >> PKT_BUFFER_BITS) {
517 printk("cookie mismatch: %x != %x\n",
518 id >> PKT_BUFFER_BITS, p->cookie);
520 spin_unlock_irqrestore(&buffer_lock, flags);
525 void fwd_discard_all(void)
529 for (i = 0; i < N_PKT_BUFFERS; i++) {
531 unsigned long int flags;
533 /* Defer kfree_skb() until interrupts re-enabled. */
534 spin_lock_irqsave(&buffer_lock, flags);
535 skb = buffers[i].skb;
536 buffers[i].skb = NULL;
537 spin_unlock_irqrestore(&buffer_lock, flags);
543 static void discard_skb(uint32_t id)
545 struct sk_buff *old_skb = NULL;
546 unsigned long int flags;
547 struct packet_buffer *p;
549 spin_lock_irqsave(&buffer_lock, flags);
550 p = &buffers[id & PKT_BUFFER_MASK];
551 if (p->cookie == id >> PKT_BUFFER_BITS) {
552 /* Defer kfree_skb() until interrupts re-enabled. */
556 spin_unlock_irqrestore(&buffer_lock, flags);