Add support for listing and deleting entries based on an output port.
[sliver-openvswitch.git] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <asm/uaccess.h>
12 #include <linux/types.h>
13 #include "forward.h"
14 #include "datapath.h"
15 #include "openflow/nicira-ext.h"
16 #include "dp_act.h"
17 #include "nx_msg.h"
18 #include "chain.h"
19 #include "flow.h"
20
21 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
22
23
24 static struct sk_buff *retrieve_skb(uint32_t id);
25 static void discard_skb(uint32_t id);
26
27 /* 'skb' was received on port 'p', which may be a physical switch port, the
28  * local port, or a null pointer.  Process it according to 'chain'.  Returns 0
29  * if successful, in which case 'skb' is destroyed, or -ESRCH if there is no
30  * matching flow, in which case 'skb' still belongs to the caller. */
31 int run_flow_through_tables(struct sw_chain *chain, struct sk_buff *skb,
32                             struct net_bridge_port *p)
33 {
34         /* Ethernet address used as the destination for STP frames. */
35         static const uint8_t stp_eth_addr[ETH_ALEN]
36                 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
37         struct sw_flow_key key;
38         struct sw_flow *flow;
39
40         if (flow_extract(skb, p ? p->port_no : OFPP_NONE, &key)
41             && (chain->dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
42                 /* Drop fragment. */
43                 kfree_skb(skb);
44                 return 0;
45         }
46         if (p && p->config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
47             p->config & (compare_ether_addr(key.dl_dst, stp_eth_addr)
48                         ? OFPPC_NO_RECV : OFPPC_NO_RECV_STP)) {
49                 kfree_skb(skb);
50                 return 0;
51         }
52
53         flow = chain_lookup(chain, &key);
54         if (likely(flow != NULL)) {
55                 struct sw_flow_actions *sf_acts = rcu_dereference(flow->sf_acts);
56                 flow_used(flow, skb);
57                 execute_actions(chain->dp, skb, &key,
58                                 sf_acts->actions, sf_acts->actions_len, 0);
59                 return 0;
60         } else {
61                 return -ESRCH;
62         }
63 }
64
65 /* 'skb' was received on port 'p', which may be a physical switch port, the
66  * local port, or a null pointer.  Process it according to 'chain', sending it
67  * up to the controller if no flow matches.  Takes ownership of 'skb'. */
68 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb,
69                     struct net_bridge_port *p)
70 {
71         WARN_ON_ONCE(skb_shared(skb));
72         WARN_ON_ONCE(skb->destructor);
73         if (run_flow_through_tables(chain, skb, p))
74                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
75                                   chain->dp->miss_send_len,
76                                   OFPR_NO_MATCH);
77 }
78
79 static int
80 recv_hello(struct sw_chain *chain, const struct sender *sender,
81            const void *msg)
82 {
83         return dp_send_hello(chain->dp, sender, msg);
84 }
85
86 static int
87 recv_features_request(struct sw_chain *chain, const struct sender *sender,
88                       const void *msg) 
89 {
90         return dp_send_features_reply(chain->dp, sender);
91 }
92
93 static int
94 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
95                         const void *msg)
96 {
97         return dp_send_config_reply(chain->dp, sender);
98 }
99
100 static int
101 recv_set_config(struct sw_chain *chain, const struct sender *sender,
102                 const void *msg)
103 {
104         const struct ofp_switch_config *osc = msg;
105         int flags;
106
107         flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
108         if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
109             && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
110                 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
111         }
112         chain->dp->flags = flags;
113
114         chain->dp->miss_send_len = ntohs(osc->miss_send_len);
115
116         return 0;
117 }
118
119 static int
120 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
121                 const void *msg)
122 {
123         const struct ofp_packet_out *opo = msg;
124         struct sk_buff *skb;
125         uint16_t v_code;
126         struct sw_flow_key key;
127         size_t actions_len = ntohs(opo->actions_len);
128
129         if (actions_len > (ntohs(opo->header.length) - sizeof *opo)) {
130                 if (net_ratelimit()) 
131                         printk("message too short for number of actions\n");
132                 return -EINVAL;
133         }
134
135         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
136                 int data_len = ntohs(opo->header.length) - sizeof *opo - actions_len;
137
138                 /* FIXME: there is likely a way to reuse the data in msg. */
139                 skb = alloc_skb(data_len, GFP_ATOMIC);
140                 if (!skb)
141                         return -ENOMEM;
142
143                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
144                  * we're just transmitting this raw without examining anything
145                  * at those layers. */
146                 skb_put(skb, data_len);
147                 skb_copy_to_linear_data(skb,
148                                         (uint8_t *)opo->actions + actions_len, 
149                                         data_len);
150                 skb_reset_mac_header(skb);
151         } else {
152                 skb = retrieve_skb(ntohl(opo->buffer_id));
153                 if (!skb)
154                         return -ESRCH;
155         }
156
157         dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
158
159         flow_extract(skb, ntohs(opo->in_port), &key);
160
161         v_code = validate_actions(chain->dp, &key, opo->actions, actions_len);
162         if (v_code != ACT_VALIDATION_OK) {
163                 dp_send_error_msg(chain->dp, sender, OFPET_BAD_ACTION, v_code,
164                                   msg, ntohs(opo->header.length));
165                 goto error;
166         }
167
168         execute_actions(chain->dp, skb, &key, opo->actions, actions_len, 1);
169
170         return 0;
171
172 error:
173         kfree_skb(skb);
174         return -EINVAL;
175 }
176
177 static int
178 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
179               const void *msg)
180 {
181         const struct ofp_port_mod *opm = msg;
182
183         dp_update_port_flags(chain->dp, opm);
184
185         return 0;
186 }
187
188 static int
189 recv_echo_request(struct sw_chain *chain, const struct sender *sender,
190                   const void *msg) 
191 {
192         return dp_send_echo_reply(chain->dp, sender, msg);
193 }
194
195 static int
196 recv_echo_reply(struct sw_chain *chain, const struct sender *sender,
197                   const void *msg) 
198 {
199         return 0;
200 }
201
202 static int
203 add_flow(struct sw_chain *chain, const struct sender *sender, 
204                 const struct ofp_flow_mod *ofm)
205 {
206         int error = -ENOMEM;
207         uint16_t v_code;
208         struct sw_flow *flow;
209         size_t actions_len = ntohs(ofm->header.length) - sizeof *ofm;
210
211         /* Allocate memory. */
212         flow = flow_alloc(actions_len, GFP_ATOMIC);
213         if (flow == NULL)
214                 goto error;
215
216         flow_extract_match(&flow->key, &ofm->match);
217
218         v_code = validate_actions(chain->dp, &flow->key, ofm->actions, actions_len);
219         if (v_code != ACT_VALIDATION_OK) {
220                 dp_send_error_msg(chain->dp, sender, OFPET_BAD_ACTION, v_code,
221                                   ofm, ntohs(ofm->header.length));
222                 goto error_free_flow;
223         }
224
225         /* Fill out flow. */
226         flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
227         flow->idle_timeout = ntohs(ofm->idle_timeout);
228         flow->hard_timeout = ntohs(ofm->hard_timeout);
229         flow->used = jiffies;
230         flow->init_time = jiffies;
231         flow->byte_count = 0;
232         flow->packet_count = 0;
233         spin_lock_init(&flow->lock);
234         memcpy(flow->sf_acts->actions, ofm->actions, actions_len);
235
236         /* Act. */
237         error = chain_insert(chain, flow);
238         if (error == -ENOBUFS) {
239                 dp_send_error_msg(chain->dp, sender, OFPET_FLOW_MOD_FAILED, 
240                                 OFPFMFC_ALL_TABLES_FULL, ofm, ntohs(ofm->header.length));
241                 goto error_free_flow;
242         } else if (error)
243                 goto error_free_flow;
244         error = 0;
245         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
246                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
247                 if (skb) {
248                         struct sw_flow_key key;
249                         flow_used(flow, skb);
250                         dp_set_origin(chain->dp, ntohs(ofm->match.in_port), skb);
251                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
252                         execute_actions(chain->dp, skb, &key, ofm->actions, actions_len, 0);
253                 }
254                 else
255                         error = -ESRCH;
256         }
257         return error;
258
259 error_free_flow:
260         flow_free(flow);
261 error:
262         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
263                 discard_skb(ntohl(ofm->buffer_id));
264         return error;
265 }
266
267 static int
268 mod_flow(struct sw_chain *chain, const struct sender *sender,
269                 const struct ofp_flow_mod *ofm)
270 {
271         int error = -ENOMEM;
272         uint16_t v_code;
273         size_t actions_len;
274         struct sw_flow_key key;
275         uint16_t priority;
276         int strict;
277
278         flow_extract_match(&key, &ofm->match);
279
280         actions_len = ntohs(ofm->header.length) - sizeof *ofm;
281
282         v_code = validate_actions(chain->dp, &key, ofm->actions, actions_len);
283         if (v_code != ACT_VALIDATION_OK) {
284                 dp_send_error_msg(chain->dp, sender, OFPET_BAD_ACTION, v_code,
285                                   ofm, ntohs(ofm->header.length));
286                 goto error;
287         }
288
289         priority = key.wildcards ? ntohs(ofm->priority) : -1;
290         strict = (ofm->command == htons(OFPFC_MODIFY_STRICT)) ? 1 : 0;
291         chain_modify(chain, &key, priority, strict, ofm->actions, actions_len);
292
293         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
294                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
295                 if (skb) {
296                         struct sw_flow_key skb_key;
297                         flow_extract(skb, ntohs(ofm->match.in_port), &skb_key);
298                         execute_actions(chain->dp, skb, &skb_key, 
299                                         ofm->actions, actions_len, 0);
300                 }
301                 else
302                         error = -ESRCH;
303         }
304         return error;
305
306 error:
307         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
308                 discard_skb(ntohl(ofm->buffer_id));
309         return error;
310 }
311
312 static int
313 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
314 {
315         const struct ofp_flow_mod *ofm = msg;
316         uint16_t command = ntohs(ofm->command);
317
318         if (command == OFPFC_ADD) {
319                 return add_flow(chain, sender, ofm);
320         } else if ((command == OFPFC_MODIFY) || (command == OFPFC_MODIFY_STRICT)) {
321                 return mod_flow(chain, sender, ofm);
322         }  else if (command == OFPFC_DELETE) {
323                 struct sw_flow_key key;
324                 flow_extract_match(&key, &ofm->match);
325                 return chain_delete(chain, &key, ofm->out_port, 0, 0) ? 0 : -ESRCH;
326         } else if (command == OFPFC_DELETE_STRICT) {
327                 struct sw_flow_key key;
328                 uint16_t priority;
329                 flow_extract_match(&key, &ofm->match);
330                 priority = key.wildcards ? ntohs(ofm->priority) : -1;
331                 return chain_delete(chain, &key, ofm->out_port, 
332                                 priority, 1) ? 0 : -ESRCH;
333         } else {
334                 return -ENOTSUPP;
335         }
336 }
337
338 static int
339 recv_vendor(struct sw_chain *chain, const struct sender *sender, 
340                 const void *msg)
341 {
342         const struct ofp_vendor_header *ovh = msg;
343
344         switch(ntohl(ovh->vendor))
345         {
346         case NX_VENDOR_ID:
347                 return nx_recv_msg(chain, sender, msg);
348         default:
349                 if (net_ratelimit())
350                         printk("Uknown vendor: %#x\n", ntohl(ovh->vendor));
351                 dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
352                                   OFPBRC_BAD_VENDOR, msg, ntohs(ovh->header.length));
353                 return -EINVAL;
354         }
355 }
356
357 /* 'msg', which is 'length' bytes long, was received across Netlink from
358  * 'sender'.  Apply it to 'chain'. */
359 int
360 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
361                   const void *msg, size_t length)
362 {
363
364         struct openflow_packet {
365                 size_t min_size;
366                 int (*handler)(struct sw_chain *, const struct sender *,
367                                const void *);
368         };
369
370         static const struct openflow_packet packets[] = {
371                 [OFPT_HELLO] = {
372                         sizeof (struct ofp_header),
373                         recv_hello,
374                 },
375                 [OFPT_ECHO_REQUEST] = {
376                         sizeof (struct ofp_header),
377                         recv_echo_request,
378                 },
379                 [OFPT_ECHO_REPLY] = {
380                         sizeof (struct ofp_header),
381                         recv_echo_reply,
382                 },
383                 [OFPT_VENDOR] = {
384                         sizeof (struct ofp_vendor_header),
385                         recv_vendor,
386                 },
387                 [OFPT_FEATURES_REQUEST] = {
388                         sizeof (struct ofp_header),
389                         recv_features_request,
390                 },
391                 [OFPT_GET_CONFIG_REQUEST] = {
392                         sizeof (struct ofp_header),
393                         recv_get_config_request,
394                 },
395                 [OFPT_SET_CONFIG] = {
396                         sizeof (struct ofp_switch_config),
397                         recv_set_config,
398                 },
399                 [OFPT_PACKET_OUT] = {
400                         sizeof (struct ofp_packet_out),
401                         recv_packet_out,
402                 },
403                 [OFPT_FLOW_MOD] = {
404                         sizeof (struct ofp_flow_mod),
405                         recv_flow,
406                 },
407                 [OFPT_PORT_MOD] = {
408                         sizeof (struct ofp_port_mod),
409                         recv_port_mod,
410                 }
411         };
412
413         struct ofp_header *oh;
414
415         oh = (struct ofp_header *) msg;
416         if (oh->version != OFP_VERSION
417             && oh->type != OFPT_HELLO
418             && oh->type != OFPT_ERROR
419             && oh->type != OFPT_ECHO_REQUEST
420             && oh->type != OFPT_ECHO_REPLY
421             && oh->type != OFPT_VENDOR)
422         {
423                 dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
424                                   OFPBRC_BAD_VERSION, msg, length);
425                 return -EINVAL;
426         }
427         if (ntohs(oh->length) != length) {
428                 if (net_ratelimit())
429                         printk("received message length wrong: %d/%d\n", 
430                                 ntohs(oh->length), length);
431                 return -EINVAL;
432         }
433
434         if (oh->type < ARRAY_SIZE(packets)) {
435                 const struct openflow_packet *pkt = &packets[oh->type];
436                 if (pkt->handler) {
437                         if (length < pkt->min_size)
438                                 return -EFAULT;
439                         return pkt->handler(chain, sender, msg);
440                 }
441         }
442         dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
443                           OFPBRC_BAD_TYPE, msg, length);
444         return -EINVAL;
445 }
446
447 /* Packet buffering. */
448
449 #define OVERWRITE_SECS  1
450 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
451
452 struct packet_buffer {
453         struct sk_buff *skb;
454         uint32_t cookie;
455         unsigned long exp_jiffies;
456 };
457
458 static struct packet_buffer buffers[N_PKT_BUFFERS];
459 static unsigned int buffer_idx;
460 static DEFINE_SPINLOCK(buffer_lock);
461
462 uint32_t fwd_save_skb(struct sk_buff *skb)
463 {
464         struct sk_buff *old_skb = NULL;
465         struct packet_buffer *p;
466         unsigned long int flags;
467         uint32_t id;
468
469         /* FIXME: Probably just need a skb_clone() here. */
470         skb = skb_copy(skb, GFP_ATOMIC);
471         if (!skb)
472                 return -1;
473
474         spin_lock_irqsave(&buffer_lock, flags);
475         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
476         p = &buffers[buffer_idx];
477         if (p->skb) {
478                 /* Don't buffer packet if existing entry is less than
479                  * OVERWRITE_SECS old. */
480                 if (time_before(jiffies, p->exp_jiffies)) {
481                         spin_unlock_irqrestore(&buffer_lock, flags);
482                         kfree_skb(skb);
483                         return -1;
484                 } else {
485                         /* Defer kfree_skb() until interrupts re-enabled.
486                          * FIXME: we only need to do that if it has a
487                          * destructor, but it never should since we orphan
488                          * sk_buffs on entry. */
489                         old_skb = p->skb;
490                 }
491         }
492         /* Don't use maximum cookie value since the all-bits-1 id is
493          * special. */
494         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
495                 p->cookie = 0;
496         p->skb = skb;
497         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
498         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
499         spin_unlock_irqrestore(&buffer_lock, flags);
500
501         if (old_skb)
502                 kfree_skb(old_skb);
503
504         return id;
505 }
506
507 static struct sk_buff *retrieve_skb(uint32_t id)
508 {
509         unsigned long int flags;
510         struct sk_buff *skb = NULL;
511         struct packet_buffer *p;
512
513         spin_lock_irqsave(&buffer_lock, flags);
514         p = &buffers[id & PKT_BUFFER_MASK];
515         if (p->cookie == id >> PKT_BUFFER_BITS) {
516                 skb = p->skb;
517                 p->skb = NULL;
518         } else {
519                 printk("cookie mismatch: %x != %x\n",
520                                 id >> PKT_BUFFER_BITS, p->cookie);
521         }
522         spin_unlock_irqrestore(&buffer_lock, flags);
523
524         return skb;
525 }
526
527 void fwd_discard_all(void) 
528 {
529         int i;
530
531         for (i = 0; i < N_PKT_BUFFERS; i++) {
532                 struct sk_buff *skb;
533                 unsigned long int flags;
534
535                 /* Defer kfree_skb() until interrupts re-enabled. */
536                 spin_lock_irqsave(&buffer_lock, flags);
537                 skb = buffers[i].skb;
538                 buffers[i].skb = NULL;
539                 spin_unlock_irqrestore(&buffer_lock, flags);
540
541                 kfree_skb(skb);
542         }
543 }
544
545 static void discard_skb(uint32_t id)
546 {
547         struct sk_buff *old_skb = NULL;
548         unsigned long int flags;
549         struct packet_buffer *p;
550
551         spin_lock_irqsave(&buffer_lock, flags);
552         p = &buffers[id & PKT_BUFFER_MASK];
553         if (p->cookie == id >> PKT_BUFFER_BITS) {
554                 /* Defer kfree_skb() until interrupts re-enabled. */
555                 old_skb = p->skb;
556                 p->skb = NULL;
557         }
558         spin_unlock_irqrestore(&buffer_lock, flags);
559
560         if (old_skb)
561                 kfree_skb(old_skb);
562 }
563
564 void fwd_exit(void)
565 {
566         fwd_discard_all();
567 }