8e2bc112ef34806b2d14c45666fdf8e61d2352c3
[sliver-openvswitch.git] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/udp.h>
13 #include <linux/in6.h>
14 #include <asm/uaccess.h>
15 #include <linux/types.h>
16 #include <net/checksum.h>
17 #include "forward.h"
18 #include "datapath.h"
19 #include "chain.h"
20 #include "flow.h"
21
22 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
23
24 static int make_writable(struct sk_buff **);
25
26 static struct sk_buff *retrieve_skb(uint32_t id);
27 static void discard_skb(uint32_t id);
28
29 /* 'skb' was received on 'in_port', a physical switch port between 0 and
30  * OFPP_MAX.  Process it according to 'chain'.  Returns 0 if successful, in
31  * which case 'skb' is destroyed, or -ESRCH if there is no matching flow, in
32  * which case 'skb' still belongs to the caller. */
33 int run_flow_through_tables(struct sw_chain *chain, struct sk_buff *skb,
34                             int in_port)
35 {
36         struct sw_flow_key key;
37         struct sw_flow *flow;
38
39         flow_extract(skb, in_port, &key);
40         flow = chain_lookup(chain, &key);
41         if (likely(flow != NULL)) {
42                 flow_used(flow, skb);
43                 execute_actions(chain->dp, skb, &key,
44                                 flow->actions, flow->n_actions);
45                 return 0;
46         } else {
47                 return -ESRCH;
48         }
49 }
50
51 /* 'skb' was received on 'in_port', a physical switch port between 0 and
52  * OFPP_MAX.  Process it according to 'chain', sending it up to the controller
53  * if no flow matches.  Takes ownership of 'skb'. */
54 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb, int in_port)
55 {
56         if (run_flow_through_tables(chain, skb, in_port))
57                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
58                                   chain->dp->miss_send_len,
59                                   OFPR_NO_MATCH);
60 }
61
62 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
63                         int out_port)
64 {
65         if (!skb)
66                 return -ENOMEM;
67         return (likely(out_port != OFPP_CONTROLLER)
68                 ? dp_output_port(dp, skb, out_port)
69                 : dp_output_control(dp, skb, fwd_save_skb(skb),
70                                          max_len, OFPR_ACTION));
71 }
72
73 void execute_actions(struct datapath *dp, struct sk_buff *skb,
74                                 const struct sw_flow_key *key,
75                                 const struct ofp_action *actions, int n_actions)
76 {
77         /* Every output action needs a separate clone of 'skb', but the common
78          * case is just a single output action, so that doing a clone and
79          * then freeing the original skbuff is wasteful.  So the following code
80          * is slightly obscure just to avoid that. */
81         int prev_port;
82         size_t max_len=0;        /* Initialze to make compiler happy */
83         uint16_t eth_proto;
84         int i;
85
86         prev_port = -1;
87         eth_proto = ntohs(key->dl_type);
88
89         for (i = 0; i < n_actions; i++) {
90                 const struct ofp_action *a = &actions[i];
91
92                 if (prev_port != -1) {
93                         do_output(dp, skb_clone(skb, GFP_ATOMIC),
94                                   max_len, prev_port);
95                         prev_port = -1;
96                 }
97
98                 if (likely(a->type == htons(OFPAT_OUTPUT))) {
99                         prev_port = ntohs(a->arg.output.port);
100                         max_len = ntohs(a->arg.output.max_len);
101                 } else {
102                         if (!make_writable(&skb)) {
103                                 if (net_ratelimit())
104                                     printk("make_writable failed\n");
105                                 break;
106                         }
107                         skb = execute_setter(skb, eth_proto, key, a);
108                         if (!skb) {
109                                 if (net_ratelimit())
110                                         printk("execute_setter lost skb\n");
111                                 return;
112                         }
113                 }
114         }
115         if (prev_port != -1)
116                 do_output(dp, skb, max_len, prev_port);
117         else
118                 kfree_skb(skb);
119 }
120
121 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
122  * covered by the sum has been changed from 'from' to 'to'.  If set,
123  * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
124  * Based on nf_proto_csum_replace4. */
125 static void update_csum(__sum16 *sum, struct sk_buff *skb,
126                         __be32 from, __be32 to, int pseudohdr)
127 {
128         __be32 diff[] = { ~from, to };
129         if (skb->ip_summed != CHECKSUM_PARTIAL) {
130                 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
131                                 ~csum_unfold(*sum)));
132                 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
133                         skb->csum = ~csum_partial((char *)diff, sizeof(diff),
134                                                 ~skb->csum);
135         } else if (pseudohdr)
136                 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
137                                 csum_unfold(*sum)));
138 }
139
140 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
141                         uint8_t nw_proto, const struct ofp_action *a)
142 {
143         if (eth_proto == ETH_P_IP) {
144                 struct iphdr *nh = ip_hdr(skb);
145                 uint32_t new, *field;
146
147                 new = a->arg.nw_addr;
148
149                 if (a->type == htons(OFPAT_SET_NW_SRC))
150                         field = &nh->saddr;
151                 else
152                         field = &nh->daddr;
153
154                 if (nw_proto == IPPROTO_TCP) {
155                         struct tcphdr *th = tcp_hdr(skb);
156                         update_csum(&th->check, skb, *field, new, 1);
157                 } else if (nw_proto == IPPROTO_UDP) {
158                         struct udphdr *th = udp_hdr(skb);
159                         update_csum(&th->check, skb, *field, new, 1);
160                 }
161                 update_csum(&nh->check, skb, *field, new, 0);
162                 *field = new;
163         }
164 }
165
166 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
167                         uint8_t nw_proto, const struct ofp_action *a)
168 {
169         if (eth_proto == ETH_P_IP) {
170                 uint16_t new, *field;
171
172                 new = a->arg.tp;
173
174                 if (nw_proto == IPPROTO_TCP) {
175                         struct tcphdr *th = tcp_hdr(skb);
176
177                         if (a->type == htons(OFPAT_SET_TP_SRC))
178                                 field = &th->source;
179                         else
180                                 field = &th->dest;
181
182                         update_csum(&th->check, skb, *field, new, 1);
183                         *field = new;
184                 } else if (nw_proto == IPPROTO_UDP) {
185                         struct udphdr *th = udp_hdr(skb);
186
187                         if (a->type == htons(OFPAT_SET_TP_SRC))
188                                 field = &th->source;
189                         else
190                                 field = &th->dest;
191
192                         update_csum(&th->check, skb, *field, new, 1);
193                         *field = new;
194                 }
195         }
196 }
197
198 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
199 {
200         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
201         struct ethhdr *eh;
202
203
204         /* Verify we were given a vlan packet */
205         if (vh->h_vlan_proto != htons(ETH_P_8021Q))
206                 return skb;
207
208         memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
209
210         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
211
212         skb->protocol = eh->h_proto;
213         skb->mac_header += VLAN_HLEN;
214
215         return skb;
216 }
217
218 static struct sk_buff *modify_vlan(struct sk_buff *skb, 
219                 const struct sw_flow_key *key, const struct ofp_action *a)
220 {
221         uint16_t new_id = ntohs(a->arg.vlan_id);
222
223         if (new_id != OFP_VLAN_NONE) {
224                 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
225                         /* Modify vlan id, but maintain other TCI values */
226                         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
227                         vh->h_vlan_TCI = (vh->h_vlan_TCI 
228                                         & ~(htons(VLAN_VID_MASK))) | a->arg.vlan_id;
229                 } else  {
230                         /* Add vlan header */
231
232                         /* xxx The vlan_put_tag function, doesn't seem to work
233                          * xxx reliably when it attempts to use the hardware-accelerated
234                          * xxx version.  We'll directly use the software version
235                          * xxx until the problem can be diagnosed.
236                          */
237                         skb = __vlan_put_tag(skb, new_id);
238                 }
239         } else  {
240                 /* Remove an existing vlan header if it exists */
241                 vlan_pull_tag(skb);
242         }
243
244         return skb;
245 }
246
247 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
248                         const struct sw_flow_key *key, const struct ofp_action *a)
249 {
250         switch (ntohs(a->type)) {
251         case OFPAT_SET_DL_VLAN:
252                 skb = modify_vlan(skb, key, a);
253                 break;
254
255         case OFPAT_SET_DL_SRC: {
256                 struct ethhdr *eh = eth_hdr(skb);
257                 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
258                 break;
259         }
260         case OFPAT_SET_DL_DST: {
261                 struct ethhdr *eh = eth_hdr(skb);
262                 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
263                 break;
264         }
265
266         case OFPAT_SET_NW_SRC:
267         case OFPAT_SET_NW_DST:
268                 modify_nh(skb, eth_proto, key->nw_proto, a);
269                 break;
270
271         case OFPAT_SET_TP_SRC:
272         case OFPAT_SET_TP_DST:
273                 modify_th(skb, eth_proto, key->nw_proto, a);
274                 break;
275         
276         default:
277                 if (net_ratelimit())
278                         printk("execute_setter: unknown action: %d\n", ntohs(a->type));
279         }
280
281         return skb;
282 }
283
284 static int
285 recv_features_request(struct sw_chain *chain, const struct sender *sender,
286                       const void *msg) 
287 {
288         return dp_send_features_reply(chain->dp, sender);
289 }
290
291 static int
292 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
293                         const void *msg)
294 {
295         return dp_send_config_reply(chain->dp, sender);
296 }
297
298 static int
299 recv_set_config(struct sw_chain *chain, const struct sender *sender,
300                 const void *msg)
301 {
302         const struct ofp_switch_config *osc = msg;
303
304         chain->dp->flags = ntohs(osc->flags);
305         chain->dp->miss_send_len = ntohs(osc->miss_send_len);
306
307         return 0;
308 }
309
310 static int
311 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
312                 const void *msg)
313 {
314         const struct ofp_packet_out *opo = msg;
315         struct sk_buff *skb;
316         struct vlan_ethhdr *mac;
317         int nh_ofs;
318
319         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
320                 int data_len = ntohs(opo->header.length) - sizeof *opo;
321
322                 /* FIXME: there is likely a way to reuse the data in msg. */
323                 skb = alloc_skb(data_len, GFP_ATOMIC);
324                 if (!skb)
325                         return -ENOMEM;
326
327                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
328                  * we're just transmitting this raw without examining anything
329                  * at those layers. */
330                 memcpy(skb_put(skb, data_len), opo->u.data, data_len);
331                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
332
333                 skb_set_mac_header(skb, 0);
334                 mac = vlan_eth_hdr(skb);
335                 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
336                         nh_ofs = sizeof(struct ethhdr);
337                 else
338                         nh_ofs = sizeof(struct vlan_ethhdr);
339                 skb_set_network_header(skb, nh_ofs);
340
341                 dp_output_port(chain->dp, skb, ntohs(opo->out_port));
342         } else {
343                 struct sw_flow_key key;
344                 int n_acts;
345
346                 skb = retrieve_skb(ntohl(opo->buffer_id));
347                 if (!skb)
348                         return -ESRCH;
349                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
350
351                 n_acts = (ntohs(opo->header.length) - sizeof *opo) 
352                                 / sizeof *opo->u.actions;
353                 flow_extract(skb, ntohs(opo->in_port), &key);
354                 execute_actions(chain->dp, skb, &key, opo->u.actions, n_acts);
355         }
356         return 0;
357 }
358
359 static int
360 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
361               const void *msg)
362 {
363         const struct ofp_port_mod *opm = msg;
364
365         dp_update_port_flags(chain->dp, &opm->desc);
366
367         return 0;
368 }
369
370 static int
371 recv_echo_request(struct sw_chain *chain, const struct sender *sender,
372                   const void *msg) 
373 {
374         return dp_send_echo_reply(chain->dp, sender, msg);
375 }
376
377 static int
378 recv_echo_reply(struct sw_chain *chain, const struct sender *sender,
379                   const void *msg) 
380 {
381         return 0;
382 }
383
384 static int
385 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
386 {
387         int error = -ENOMEM;
388         int i;
389         int n_acts;
390         struct sw_flow *flow;
391
392
393         /* To prevent loops, make sure there's no action to send to the
394          * OFP_TABLE virtual port.
395          */
396         n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
397         for (i=0; i<n_acts; i++) {
398                 const struct ofp_action *a = &ofm->actions[i];
399
400                 if (a->type == htons(OFPAT_OUTPUT) 
401                                         && (a->arg.output.port == htons(OFPP_TABLE) 
402                                                 || a->arg.output.port == htons(OFPP_NONE))) {
403                         /* xxx Send fancy new error message? */
404                         goto error;
405                 }
406         }
407
408         /* Allocate memory. */
409         flow = flow_alloc(n_acts, GFP_ATOMIC);
410         if (flow == NULL)
411                 goto error;
412
413         /* Fill out flow. */
414         flow_extract_match(&flow->key, &ofm->match);
415         flow->max_idle = ntohs(ofm->max_idle);
416         flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
417         flow->timeout = jiffies + flow->max_idle * HZ;
418         flow->n_actions = n_acts;
419         flow->init_time = jiffies;
420         flow->byte_count = 0;
421         flow->packet_count = 0;
422         spin_lock_init(&flow->lock);
423         memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
424
425         /* Act. */
426         error = chain_insert(chain, flow);
427         if (error)
428                 goto error_free_flow;
429         error = 0;
430         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
431                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
432                 if (skb) {
433                         struct sw_flow_key key;
434                         flow_used(flow, skb);
435                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
436                         execute_actions(chain->dp, skb, &key,
437                                         ofm->actions, n_acts);
438                 }
439                 else
440                         error = -ESRCH;
441         }
442         return error;
443
444 error_free_flow:
445         flow_free(flow);
446 error:
447         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
448                 discard_skb(ntohl(ofm->buffer_id));
449         return error;
450 }
451
452 static int
453 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
454 {
455         const struct ofp_flow_mod *ofm = msg;
456         uint16_t command = ntohs(ofm->command);
457
458         if (command == OFPFC_ADD) {
459                 return add_flow(chain, ofm);
460         }  else if (command == OFPFC_DELETE) {
461                 struct sw_flow_key key;
462                 flow_extract_match(&key, &ofm->match);
463                 return chain_delete(chain, &key, 0, 0) ? 0 : -ESRCH;
464         } else if (command == OFPFC_DELETE_STRICT) {
465                 struct sw_flow_key key;
466                 uint16_t priority;
467                 flow_extract_match(&key, &ofm->match);
468                 priority = key.wildcards ? ntohs(ofm->priority) : -1;
469                 return chain_delete(chain, &key, priority, 1) ? 0 : -ESRCH;
470         } else {
471                 return -ENOTSUPP;
472         }
473 }
474
475 /* 'msg', which is 'length' bytes long, was received across Netlink from
476  * 'sender'.  Apply it to 'chain'. */
477 int
478 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
479                   const void *msg, size_t length)
480 {
481
482         struct openflow_packet {
483                 size_t min_size;
484                 int (*handler)(struct sw_chain *, const struct sender *,
485                                const void *);
486         };
487
488         static const struct openflow_packet packets[] = {
489                 [OFPT_FEATURES_REQUEST] = {
490                         sizeof (struct ofp_header),
491                         recv_features_request,
492                 },
493                 [OFPT_GET_CONFIG_REQUEST] = {
494                         sizeof (struct ofp_header),
495                         recv_get_config_request,
496                 },
497                 [OFPT_SET_CONFIG] = {
498                         sizeof (struct ofp_switch_config),
499                         recv_set_config,
500                 },
501                 [OFPT_PACKET_OUT] = {
502                         sizeof (struct ofp_packet_out),
503                         recv_packet_out,
504                 },
505                 [OFPT_FLOW_MOD] = {
506                         sizeof (struct ofp_flow_mod),
507                         recv_flow,
508                 },
509                 [OFPT_PORT_MOD] = {
510                         sizeof (struct ofp_port_mod),
511                         recv_port_mod,
512                 },
513                 [OFPT_ECHO_REQUEST] = {
514                         sizeof (struct ofp_header),
515                         recv_echo_request,
516                 },
517                 [OFPT_ECHO_REPLY] = {
518                         sizeof (struct ofp_header),
519                         recv_echo_reply,
520                 },
521         };
522
523         const struct openflow_packet *pkt;
524         struct ofp_header *oh;
525
526         oh = (struct ofp_header *) msg;
527         if (oh->version != OFP_VERSION || oh->type >= ARRAY_SIZE(packets)
528                 || ntohs(oh->length) > length)
529                 return -EINVAL;
530
531         pkt = &packets[oh->type];
532         if (!pkt->handler)
533                 return -ENOSYS;
534         if (length < pkt->min_size)
535                 return -EFAULT;
536
537         return pkt->handler(chain, sender, msg);
538 }
539
540 /* Packet buffering. */
541
542 #define OVERWRITE_SECS  1
543 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
544
545 struct packet_buffer {
546         struct sk_buff *skb;
547         uint32_t cookie;
548         unsigned long exp_jiffies;
549 };
550
551 static struct packet_buffer buffers[N_PKT_BUFFERS];
552 static unsigned int buffer_idx;
553 static DEFINE_SPINLOCK(buffer_lock);
554
555 uint32_t fwd_save_skb(struct sk_buff *skb)
556 {
557         struct packet_buffer *p;
558         unsigned long int flags;
559         uint32_t id;
560
561         spin_lock_irqsave(&buffer_lock, flags);
562         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
563         p = &buffers[buffer_idx];
564         if (p->skb) {
565                 /* Don't buffer packet if existing entry is less than
566                  * OVERWRITE_SECS old. */
567                 if (time_before(jiffies, p->exp_jiffies)) {
568                         spin_unlock_irqrestore(&buffer_lock, flags);
569                         return -1;
570                 } else 
571                         kfree_skb(p->skb);
572         }
573         /* Don't use maximum cookie value since the all-bits-1 id is
574          * special. */
575         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
576                 p->cookie = 0;
577         skb_get(skb);
578         p->skb = skb;
579         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
580         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
581         spin_unlock_irqrestore(&buffer_lock, flags);
582
583         return id;
584 }
585
586 static struct sk_buff *retrieve_skb(uint32_t id)
587 {
588         unsigned long int flags;
589         struct sk_buff *skb = NULL;
590         struct packet_buffer *p;
591
592         spin_lock_irqsave(&buffer_lock, flags);
593         p = &buffers[id & PKT_BUFFER_MASK];
594         if (p->cookie == id >> PKT_BUFFER_BITS) {
595                 skb = p->skb;
596                 p->skb = NULL;
597         } else {
598                 printk("cookie mismatch: %x != %x\n",
599                                 id >> PKT_BUFFER_BITS, p->cookie);
600         }
601         spin_unlock_irqrestore(&buffer_lock, flags);
602
603         return skb;
604 }
605
606 void fwd_discard_all(void) 
607 {
608         unsigned long int flags;
609         int i;
610
611         spin_lock_irqsave(&buffer_lock, flags);
612         for (i = 0; i < N_PKT_BUFFERS; i++) {
613                 kfree_skb(buffers[i].skb);
614                 buffers[i].skb = NULL;
615         }
616         spin_unlock_irqrestore(&buffer_lock, flags);
617 }
618
619 static void discard_skb(uint32_t id)
620 {
621         unsigned long int flags;
622         struct packet_buffer *p;
623
624         spin_lock_irqsave(&buffer_lock, flags);
625         p = &buffers[id & PKT_BUFFER_MASK];
626         if (p->cookie == id >> PKT_BUFFER_BITS) {
627                 kfree_skb(p->skb);
628                 p->skb = NULL;
629         }
630         spin_unlock_irqrestore(&buffer_lock, flags);
631 }
632
633 void fwd_exit(void)
634 {
635         fwd_discard_all();
636 }
637
638 /* Utility functions. */
639
640 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
641  * the copy.
642  * Returns 1 if successful, 0 on failure. */
643 static int
644 make_writable(struct sk_buff **pskb)
645 {
646         /* Based on skb_make_writable() in net/netfilter/core.c. */
647         struct sk_buff *nskb;
648
649         /* Not exclusive use of packet?  Must copy. */
650         if (skb_shared(*pskb) || skb_cloned(*pskb))
651                 goto copy_skb;
652
653         return pskb_may_pull(*pskb, 40); /* FIXME? */
654
655 copy_skb:
656         nskb = skb_copy(*pskb, GFP_ATOMIC);
657         if (!nskb)
658                 return 0;
659         BUG_ON(skb_is_nonlinear(nskb));
660
661         /* Rest of kernel will get very unhappy if we pass it a
662            suddenly-orphaned skbuff */
663         if ((*pskb)->sk)
664                 skb_set_owner_w(nskb, (*pskb)->sk);
665         kfree_skb(*pskb);
666         *pskb = nskb;
667         return 1;
668 }