Add support for OFPP_TABLE virtual port.
[sliver-openvswitch.git] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/udp.h>
13 #include <linux/in6.h>
14 #include <asm/uaccess.h>
15 #include <linux/types.h>
16 #include <net/checksum.h>
17 #include "forward.h"
18 #include "datapath.h"
19 #include "chain.h"
20 #include "flow.h"
21
22 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
23
24 static int make_writable(struct sk_buff **);
25
26 static struct sk_buff *retrieve_skb(uint32_t id);
27 static void discard_skb(uint32_t id);
28
29 /* 'skb' was received on 'in_port', a physical switch port between 0 and
30  * OFPP_MAX.  Process it according to 'chain'. */
31 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb, int in_port)
32 {
33         struct sw_flow_key key;
34         struct sw_flow *flow;
35
36         flow_extract(skb, in_port, &key);
37         flow = chain_lookup(chain, &key);
38         if (likely(flow != NULL)) {
39                 flow_used(flow, skb);
40                 execute_actions(chain->dp, skb, &key,
41                                 flow->actions, flow->n_actions);
42         } else {
43                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
44                                 chain->dp->miss_send_len, OFPR_NO_MATCH);
45         }
46 }
47
48 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
49                         int out_port)
50 {
51         if (!skb)
52                 return -ENOMEM;
53         return (likely(out_port != OFPP_CONTROLLER)
54                 ? dp_output_port(dp, skb, out_port)
55                 : dp_output_control(dp, skb, fwd_save_skb(skb),
56                                          max_len, OFPR_ACTION));
57 }
58
59 void execute_actions(struct datapath *dp, struct sk_buff *skb,
60                                 const struct sw_flow_key *key,
61                                 const struct ofp_action *actions, int n_actions)
62 {
63         /* Every output action needs a separate clone of 'skb', but the common
64          * case is just a single output action, so that doing a clone and
65          * then freeing the original skbuff is wasteful.  So the following code
66          * is slightly obscure just to avoid that. */
67         int prev_port;
68         size_t max_len=0;        /* Initialze to make compiler happy */
69         uint16_t eth_proto;
70         int i;
71
72         prev_port = -1;
73         eth_proto = ntohs(key->dl_type);
74
75         for (i = 0; i < n_actions; i++) {
76                 const struct ofp_action *a = &actions[i];
77
78                 if (prev_port != -1) {
79                         do_output(dp, skb_clone(skb, GFP_ATOMIC),
80                                   max_len, prev_port);
81                         prev_port = -1;
82                 }
83
84                 if (likely(a->type == htons(OFPAT_OUTPUT))) {
85                         prev_port = ntohs(a->arg.output.port);
86                         max_len = ntohs(a->arg.output.max_len);
87                 } else {
88                         if (!make_writable(&skb)) {
89                                 printk("make_writable failed\n");
90                                 break;
91                         }
92                         skb = execute_setter(skb, eth_proto, key, a);
93                 }
94         }
95         if (prev_port != -1)
96                 do_output(dp, skb, max_len, prev_port);
97         else
98                 kfree_skb(skb);
99 }
100
101 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
102  * covered by the sum has been changed from 'from' to 'to'.  If set,
103  * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
104  * Based on nf_proto_csum_replace4. */
105 static void update_csum(__sum16 *sum, struct sk_buff *skb,
106                         __be32 from, __be32 to, int pseudohdr)
107 {
108         __be32 diff[] = { ~from, to };
109         if (skb->ip_summed != CHECKSUM_PARTIAL) {
110                 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
111                                 ~csum_unfold(*sum)));
112                 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
113                         skb->csum = ~csum_partial((char *)diff, sizeof(diff),
114                                                 ~skb->csum);
115         } else if (pseudohdr)
116                 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
117                                 csum_unfold(*sum)));
118 }
119
120 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
121                         uint8_t nw_proto, const struct ofp_action *a)
122 {
123         if (eth_proto == ETH_P_IP) {
124                 struct iphdr *nh = ip_hdr(skb);
125                 uint32_t new, *field;
126
127                 new = a->arg.nw_addr;
128
129                 if (a->type == htons(OFPAT_SET_NW_SRC))
130                         field = &nh->saddr;
131                 else
132                         field = &nh->daddr;
133
134                 if (nw_proto == IPPROTO_TCP) {
135                         struct tcphdr *th = tcp_hdr(skb);
136                         update_csum(&th->check, skb, *field, new, 1);
137                 } else if (nw_proto == IPPROTO_UDP) {
138                         struct udphdr *th = udp_hdr(skb);
139                         update_csum(&th->check, skb, *field, new, 1);
140                 }
141                 update_csum(&nh->check, skb, *field, new, 0);
142                 *field = new;
143         }
144 }
145
146 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
147                         uint8_t nw_proto, const struct ofp_action *a)
148 {
149         if (eth_proto == ETH_P_IP) {
150                 uint16_t new, *field;
151
152                 new = a->arg.tp;
153
154                 if (nw_proto == IPPROTO_TCP) {
155                         struct tcphdr *th = tcp_hdr(skb);
156
157                         if (a->type == htons(OFPAT_SET_TP_SRC))
158                                 field = &th->source;
159                         else
160                                 field = &th->dest;
161
162                         update_csum(&th->check, skb, *field, new, 1);
163                         *field = new;
164                 } else if (nw_proto == IPPROTO_UDP) {
165                         struct udphdr *th = udp_hdr(skb);
166
167                         if (a->type == htons(OFPAT_SET_TP_SRC))
168                                 field = &th->source;
169                         else
170                                 field = &th->dest;
171
172                         update_csum(&th->check, skb, *field, new, 1);
173                         *field = new;
174                 }
175         }
176 }
177
178 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
179 {
180         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
181         struct ethhdr *eh;
182
183
184         /* Verify we were given a vlan packet */
185         if (vh->h_vlan_proto != htons(ETH_P_8021Q))
186                 return skb;
187
188         memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
189
190         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
191
192         skb->protocol = eh->h_proto;
193         skb->mac_header += VLAN_HLEN;
194
195         return skb;
196 }
197
198 static struct sk_buff *modify_vlan(struct sk_buff *skb, 
199                 const struct sw_flow_key *key, const struct ofp_action *a)
200 {
201         uint16_t new_id = a->arg.vlan_id;
202
203         if (new_id != OFP_VLAN_NONE) {
204                 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
205                         /* Modify vlan id, but maintain other TCI values */
206                         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
207                         vh->h_vlan_TCI = (vh->h_vlan_TCI 
208                                         & ~(htons(VLAN_VID_MASK))) | htons(new_id);
209                 } else  {
210                         /* Add vlan header */
211                         skb = vlan_put_tag(skb, new_id);
212                 }
213         } else  {
214                 /* Remove an existing vlan header if it exists */
215                 vlan_pull_tag(skb);
216         }
217
218         return skb;
219 }
220
221 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
222                         const struct sw_flow_key *key, const struct ofp_action *a)
223 {
224         switch (ntohs(a->type)) {
225         case OFPAT_SET_DL_VLAN:
226                 skb = modify_vlan(skb, key, a);
227                 break;
228
229         case OFPAT_SET_DL_SRC: {
230                 struct ethhdr *eh = eth_hdr(skb);
231                 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
232                 break;
233         }
234         case OFPAT_SET_DL_DST: {
235                 struct ethhdr *eh = eth_hdr(skb);
236                 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
237                 break;
238         }
239
240         case OFPAT_SET_NW_SRC:
241         case OFPAT_SET_NW_DST:
242                 modify_nh(skb, eth_proto, key->nw_proto, a);
243                 break;
244
245         case OFPAT_SET_TP_SRC:
246         case OFPAT_SET_TP_DST:
247                 modify_th(skb, eth_proto, key->nw_proto, a);
248                 break;
249         
250         default:
251                 if (net_ratelimit())
252                         printk("execute_setter: unknown action: %d\n", ntohs(a->type));
253         }
254
255         return skb;
256 }
257
258 static int
259 recv_control_hello(struct sw_chain *chain, const void *msg)
260 {
261         const struct ofp_control_hello *och = msg;
262
263         printk("control_hello(version=%d)\n", ntohl(och->version));
264
265         if (ntohs(och->miss_send_len) != OFP_MISS_SEND_LEN_UNCHANGED) {
266                 chain->dp->miss_send_len = ntohs(och->miss_send_len);
267         }
268
269         chain->dp->hello_flags = ntohs(och->flags);
270
271         dp_send_hello(chain->dp);
272
273         return 0;
274 }
275
276 static int
277 recv_packet_out(struct sw_chain *chain, const void *msg)
278 {
279         const struct ofp_packet_out *opo = msg;
280         struct sk_buff *skb;
281         struct vlan_ethhdr *mac;
282         int nh_ofs;
283
284         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
285                 int data_len = ntohs(opo->header.length) - sizeof *opo;
286
287                 /* FIXME: there is likely a way to reuse the data in msg. */
288                 skb = alloc_skb(data_len, GFP_ATOMIC);
289                 if (!skb)
290                         return -ENOMEM;
291
292                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
293                  * we're just transmitting this raw without examining anything
294                  * at those layers. */
295                 memcpy(skb_put(skb, data_len), opo->u.data, data_len);
296                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
297
298                 skb_set_mac_header(skb, 0);
299                 mac = vlan_eth_hdr(skb);
300                 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
301                         nh_ofs = sizeof(struct ethhdr);
302                 else
303                         nh_ofs = sizeof(struct vlan_ethhdr);
304                 skb_set_network_header(skb, nh_ofs);
305
306                 dp_output_port(chain->dp, skb, ntohs(opo->out_port));
307         } else {
308                 struct sw_flow_key key;
309                 int n_acts;
310
311                 skb = retrieve_skb(ntohl(opo->buffer_id));
312                 if (!skb)
313                         return -ESRCH;
314                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
315
316                 n_acts = (ntohs(opo->header.length) - sizeof *opo) 
317                                 / sizeof *opo->u.actions;
318                 flow_extract(skb, ntohs(opo->in_port), &key);
319                 execute_actions(chain->dp, skb, &key, opo->u.actions, n_acts);
320         }
321         return 0;
322 }
323
324 static int
325 recv_port_mod(struct sw_chain *chain, const void *msg)
326 {
327         const struct ofp_port_mod *opm = msg;
328
329         dp_update_port_flags(chain->dp, &opm->desc);
330
331         return 0;
332 }
333
334 static int
335 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
336 {
337         int error = -ENOMEM;
338         int i;
339         int n_acts;
340         struct sw_flow *flow;
341
342
343         /* Check number of actions. */
344         n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
345         if (n_acts > MAX_ACTIONS) {
346                 error = -E2BIG;
347                 goto error;
348         }
349
350         /* To prevent loops, make sure there's no action to send to the
351          * OFP_TABLE virtual port.
352          */
353         for (i=0; i<n_acts; i++) {
354                 const struct ofp_action *a = &ofm->actions[i];
355
356                 if (a->type == htons(OFPAT_OUTPUT) 
357                                         && a->arg.output.port == htons(OFPP_TABLE)) {
358                         /* xxx Send fancy new error message? */
359                         goto error;
360                 }
361         }
362
363         /* Allocate memory. */
364         flow = flow_alloc(n_acts, GFP_ATOMIC);
365         if (flow == NULL)
366                 goto error;
367
368         /* Fill out flow. */
369         flow_extract_match(&flow->key, &ofm->match);
370         flow->group_id = ntohl(ofm->group_id);
371         flow->max_idle = ntohs(ofm->max_idle);
372         flow->timeout = jiffies + flow->max_idle * HZ;
373         flow->n_actions = n_acts;
374         flow->init_time = jiffies;
375         flow->byte_count = 0;
376         flow->packet_count = 0;
377         atomic_set(&flow->deleted, 0);
378         spin_lock_init(&flow->lock);
379         memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
380
381         /* Act. */
382         error = chain_insert(chain, flow);
383         if (error)
384                 goto error_free_flow;
385         error = 0;
386         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
387                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
388                 if (skb) {
389                         struct sw_flow_key key;
390                         flow_used(flow, skb);
391                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
392                         execute_actions(chain->dp, skb, &key,
393                                         ofm->actions, n_acts);
394                 }
395                 else
396                         error = -ESRCH;
397         }
398         return error;
399
400 error_free_flow:
401         flow_free(flow);
402 error:
403         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
404                 discard_skb(ntohl(ofm->buffer_id));
405         return error;
406 }
407
408 static int
409 recv_flow(struct sw_chain *chain, const void *msg)
410 {
411         const struct ofp_flow_mod *ofm = msg;
412         uint16_t command = ntohs(ofm->command);
413
414         if (command == OFPFC_ADD) {
415                 return add_flow(chain, ofm);
416         }  else if (command == OFPFC_DELETE) {
417                 struct sw_flow_key key;
418                 flow_extract_match(&key, &ofm->match);
419                 return chain_delete(chain, &key, 0) ? 0 : -ESRCH;
420         } else if (command == OFPFC_DELETE_STRICT) {
421                 struct sw_flow_key key;
422                 flow_extract_match(&key, &ofm->match);
423                 return chain_delete(chain, &key, 1) ? 0 : -ESRCH;
424         } else {
425                 return -ENOTSUPP;
426         }
427 }
428
429 /* 'msg', which is 'length' bytes long, was received from the control path.
430  * Apply it to 'chain'. */
431 int
432 fwd_control_input(struct sw_chain *chain, const void *msg, size_t length)
433 {
434
435         struct openflow_packet {
436                 size_t min_size;
437                 int (*handler)(struct sw_chain *, const void *);
438         };
439
440         static const struct openflow_packet packets[] = {
441                 [OFPT_CONTROL_HELLO] = {
442                         sizeof (struct ofp_control_hello),
443                         recv_control_hello,
444                 },
445                 [OFPT_PACKET_OUT] = {
446                         sizeof (struct ofp_packet_out),
447                         recv_packet_out,
448                 },
449                 [OFPT_FLOW_MOD] = {
450                         sizeof (struct ofp_flow_mod),
451                         recv_flow,
452                 },
453                 [OFPT_PORT_MOD] = {
454                         sizeof (struct ofp_port_mod),
455                         recv_port_mod,
456                 },
457         };
458
459         const struct openflow_packet *pkt;
460         struct ofp_header *oh;
461
462         if (length < sizeof(struct ofp_header))
463                 return -EINVAL;
464
465         oh = (struct ofp_header *) msg;
466         if (oh->version != 1 || oh->type >= ARRAY_SIZE(packets)
467                 || ntohs(oh->length) > length)
468                 return -EINVAL;
469
470         pkt = &packets[oh->type];
471         if (!pkt->handler)
472                 return -ENOSYS;
473         if (length < pkt->min_size)
474                 return -EFAULT;
475
476         return pkt->handler(chain, msg);
477 }
478
479 /* Packet buffering. */
480
481 #define OVERWRITE_SECS  1
482 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
483
484 struct packet_buffer {
485         struct sk_buff *skb;
486         uint32_t cookie;
487         unsigned long exp_jiffies;
488 };
489
490 static struct packet_buffer buffers[N_PKT_BUFFERS];
491 static unsigned int buffer_idx;
492 static DEFINE_SPINLOCK(buffer_lock);
493
494 uint32_t fwd_save_skb(struct sk_buff *skb)
495 {
496         struct packet_buffer *p;
497         unsigned long int flags;
498         uint32_t id;
499
500         spin_lock_irqsave(&buffer_lock, flags);
501         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
502         p = &buffers[buffer_idx];
503         if (p->skb) {
504                 /* Don't buffer packet if existing entry is less than
505                  * OVERWRITE_SECS old. */
506                 if (time_before(jiffies, p->exp_jiffies)) {
507                         spin_unlock_irqrestore(&buffer_lock, flags);
508                         return -1;
509                 } else 
510                         kfree_skb(p->skb);
511         }
512         /* Don't use maximum cookie value since the all-bits-1 id is
513          * special. */
514         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
515                 p->cookie = 0;
516         skb_get(skb);
517         p->skb = skb;
518         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
519         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
520         spin_unlock_irqrestore(&buffer_lock, flags);
521
522         return id;
523 }
524
525 static struct sk_buff *retrieve_skb(uint32_t id)
526 {
527         unsigned long int flags;
528         struct sk_buff *skb = NULL;
529         struct packet_buffer *p;
530
531         spin_lock_irqsave(&buffer_lock, flags);
532         p = &buffers[id & PKT_BUFFER_MASK];
533         if (p->cookie == id >> PKT_BUFFER_BITS) {
534                 skb = p->skb;
535                 p->skb = NULL;
536         } else {
537                 printk("cookie mismatch: %x != %x\n",
538                                 id >> PKT_BUFFER_BITS, p->cookie);
539         }
540         spin_unlock_irqrestore(&buffer_lock, flags);
541
542         return skb;
543 }
544
545 static void discard_skb(uint32_t id)
546 {
547         unsigned long int flags;
548         struct packet_buffer *p;
549
550         spin_lock_irqsave(&buffer_lock, flags);
551         p = &buffers[id & PKT_BUFFER_MASK];
552         if (p->cookie == id >> PKT_BUFFER_BITS) {
553                 kfree_skb(p->skb);
554                 p->skb = NULL;
555         }
556         spin_unlock_irqrestore(&buffer_lock, flags);
557 }
558
559 void fwd_exit(void)
560 {
561         int i;
562
563         for (i = 0; i < N_PKT_BUFFERS; i++)
564                 kfree_skb(buffers[i].skb);
565 }
566
567 /* Utility functions. */
568
569 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
570  * the copy.
571  * Returns 1 if successful, 0 on failure. */
572 static int
573 make_writable(struct sk_buff **pskb)
574 {
575         /* Based on skb_make_writable() in net/netfilter/core.c. */
576         struct sk_buff *nskb;
577
578         /* Not exclusive use of packet?  Must copy. */
579         if (skb_shared(*pskb) || skb_cloned(*pskb))
580                 goto copy_skb;
581
582         return pskb_may_pull(*pskb, 64); /* FIXME? */
583
584 copy_skb:
585         nskb = skb_copy(*pskb, GFP_ATOMIC);
586         if (!nskb)
587                 return 0;
588         BUG_ON(skb_is_nonlinear(nskb));
589
590         /* Rest of kernel will get very unhappy if we pass it a
591            suddenly-orphaned skbuff */
592         if ((*pskb)->sk)
593                 skb_set_owner_w(nskb, (*pskb)->sk);
594         kfree_skb(*pskb);
595         *pskb = nskb;
596         return 1;
597 }