- Fixed endian issue with action type. Thanks, Ben!
[sliver-openvswitch.git] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/udp.h>
13 #include <linux/in6.h>
14 #include <asm/uaccess.h>
15 #include <linux/types.h>
16 #include <net/checksum.h>
17 #include "forward.h"
18 #include "datapath.h"
19 #include "chain.h"
20 #include "flow.h"
21
22 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
23
24 static void execute_actions(struct datapath *, struct sk_buff *,
25                                 const struct sw_flow_key *,
26                                 const struct ofp_action *, int n_actions);
27 static int make_writable(struct sk_buff **);
28
29 static struct sk_buff *retrieve_skb(uint32_t id);
30 static void discard_skb(uint32_t id);
31
32 /* 'skb' was received on 'in_port', a physical switch port between 0 and
33  * OFPP_MAX.  Process it according to 'chain'. */
34 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb, int in_port)
35 {
36         struct sw_flow_key key;
37         struct sw_flow *flow;
38
39         flow_extract(skb, in_port, &key);
40         flow = chain_lookup(chain, &key);
41         if (likely(flow != NULL)) {
42                 flow_used(flow, skb);
43                 execute_actions(chain->dp, skb, &key,
44                                 flow->actions, flow->n_actions);
45         } else {
46                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
47                                 chain->dp->miss_send_len, OFPR_NO_MATCH);
48         }
49 }
50
51 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
52                         int out_port)
53 {
54         if (!skb)
55                 return -ENOMEM;
56         return (likely(out_port != OFPP_CONTROLLER)
57                 ? dp_output_port(dp, skb, out_port)
58                 : dp_output_control(dp, skb, fwd_save_skb(skb),
59                                          max_len, OFPR_ACTION));
60 }
61
62 static void execute_actions(struct datapath *dp, struct sk_buff *skb,
63                                 const struct sw_flow_key *key,
64                                 const struct ofp_action *actions, int n_actions)
65 {
66         /* Every output action needs a separate clone of 'skb', but the common
67          * case is just a single output action, so that doing a clone and
68          * then freeing the original skbuff is wasteful.  So the following code
69          * is slightly obscure just to avoid that. */
70         int prev_port;
71         size_t max_len=0;        /* Initialze to make compiler happy */
72         uint16_t eth_proto;
73         int i;
74
75         prev_port = -1;
76         eth_proto = ntohs(key->dl_type);
77
78         for (i = 0; i < n_actions; i++) {
79                 const struct ofp_action *a = &actions[i];
80
81                 if (prev_port != -1) {
82                         do_output(dp, skb_clone(skb, GFP_ATOMIC),
83                                   max_len, prev_port);
84                         prev_port = -1;
85                 }
86
87                 if (likely(a->type == htons(OFPAT_OUTPUT))) {
88                         prev_port = ntohs(a->arg.output.port);
89                         max_len = ntohs(a->arg.output.max_len);
90                 } else {
91                         if (!make_writable(&skb)) {
92                                 printk("make_writable failed\n");
93                                 break;
94                         }
95                         skb = execute_setter(skb, eth_proto, key, a);
96                 }
97         }
98         if (prev_port != -1)
99                 do_output(dp, skb, max_len, prev_port);
100         else
101                 kfree_skb(skb);
102 }
103
104 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
105  * covered by the sum has been changed from 'from' to 'to'.  If set,
106  * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
107  * Based on nf_proto_csum_replace4. */
108 static void update_csum(__sum16 *sum, struct sk_buff *skb,
109                         __be32 from, __be32 to, int pseudohdr)
110 {
111         __be32 diff[] = { ~from, to };
112         if (skb->ip_summed != CHECKSUM_PARTIAL) {
113                 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
114                                 ~csum_unfold(*sum)));
115                 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
116                         skb->csum = ~csum_partial((char *)diff, sizeof(diff),
117                                                 ~skb->csum);
118         } else if (pseudohdr)
119                 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
120                                 csum_unfold(*sum)));
121 }
122
123 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
124                         uint8_t nw_proto, const struct ofp_action *a)
125 {
126         if (eth_proto == ETH_P_IP) {
127                 struct iphdr *nh = ip_hdr(skb);
128                 uint32_t new, *field;
129
130                 new = a->arg.nw_addr;
131
132                 if (a->type == htons(OFPAT_SET_NW_SRC))
133                         field = &nh->saddr;
134                 else
135                         field = &nh->daddr;
136
137                 if (nw_proto == IPPROTO_TCP) {
138                         struct tcphdr *th = tcp_hdr(skb);
139                         update_csum(&th->check, skb, *field, new, 1);
140                 } else if (nw_proto == IPPROTO_UDP) {
141                         struct udphdr *th = udp_hdr(skb);
142                         update_csum(&th->check, skb, *field, new, 1);
143                 }
144                 update_csum(&nh->check, skb, *field, new, 0);
145                 *field = new;
146         }
147 }
148
149 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
150                         uint8_t nw_proto, const struct ofp_action *a)
151 {
152         if (eth_proto == ETH_P_IP) {
153                 uint16_t new, *field;
154
155                 new = a->arg.tp;
156
157                 if (nw_proto == IPPROTO_TCP) {
158                         struct tcphdr *th = tcp_hdr(skb);
159
160                         if (a->type == htons(OFPAT_SET_TP_SRC))
161                                 field = &th->source;
162                         else
163                                 field = &th->dest;
164
165                         update_csum(&th->check, skb, *field, new, 1);
166                         *field = new;
167                 } else if (nw_proto == IPPROTO_UDP) {
168                         struct udphdr *th = udp_hdr(skb);
169
170                         if (a->type == htons(OFPAT_SET_TP_SRC))
171                                 field = &th->source;
172                         else
173                                 field = &th->dest;
174
175                         update_csum(&th->check, skb, *field, new, 1);
176                         *field = new;
177                 }
178         }
179 }
180
181 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
182 {
183         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
184         struct ethhdr *eh;
185
186
187         /* Verify we were given a vlan packet */
188         if (vh->h_vlan_proto != htons(ETH_P_8021Q))
189                 return skb;
190
191         memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
192
193         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
194
195         skb->protocol = eh->h_proto;
196         skb->mac_header += VLAN_HLEN;
197
198         return skb;
199 }
200
201 static struct sk_buff *modify_vlan(struct sk_buff *skb, 
202                 const struct sw_flow_key *key, const struct ofp_action *a)
203 {
204         uint16_t new_id = a->arg.vlan_id;
205
206         if (new_id != OFP_VLAN_NONE) {
207                 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
208                         /* Modify vlan id, but maintain other TCI values */
209                         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
210                         vh->h_vlan_TCI = (vh->h_vlan_TCI 
211                                         & ~(htons(VLAN_VID_MASK))) | htons(new_id);
212                 } else  {
213                         /* Add vlan header */
214                         skb = vlan_put_tag(skb, new_id);
215                 }
216         } else  {
217                 /* Remove an existing vlan header if it exists */
218                 vlan_pull_tag(skb);
219         }
220
221         return skb;
222 }
223
224 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
225                         const struct sw_flow_key *key, const struct ofp_action *a)
226 {
227         switch (ntohs(a->type)) {
228         case OFPAT_SET_DL_VLAN:
229                 skb = modify_vlan(skb, key, a);
230                 break;
231
232         case OFPAT_SET_DL_SRC: {
233                 struct ethhdr *eh = eth_hdr(skb);
234                 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
235                 break;
236         }
237         case OFPAT_SET_DL_DST: {
238                 struct ethhdr *eh = eth_hdr(skb);
239                 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
240                 break;
241         }
242
243         case OFPAT_SET_NW_SRC:
244         case OFPAT_SET_NW_DST:
245                 modify_nh(skb, eth_proto, key->nw_proto, a);
246                 break;
247
248         case OFPAT_SET_TP_SRC:
249         case OFPAT_SET_TP_DST:
250                 modify_th(skb, eth_proto, key->nw_proto, a);
251                 break;
252         
253         default:
254                 if (net_ratelimit())
255                         printk("execute_setter: unknown action: %d\n", ntohs(a->type));
256         }
257
258         return skb;
259 }
260
261 static int
262 recv_control_hello(struct sw_chain *chain, const void *msg)
263 {
264         const struct ofp_control_hello *och = msg;
265
266         printk("control_hello(version=%d)\n", ntohl(och->version));
267
268         if (ntohs(och->miss_send_len) != OFP_MISS_SEND_LEN_UNCHANGED) {
269                 chain->dp->miss_send_len = ntohs(och->miss_send_len);
270         }
271
272         chain->dp->hello_flags = ntohs(och->flags);
273
274         dp_send_hello(chain->dp);
275
276         return 0;
277 }
278
279 static int
280 recv_packet_out(struct sw_chain *chain, const void *msg)
281 {
282         const struct ofp_packet_out *opo = msg;
283         struct sk_buff *skb;
284         struct vlan_ethhdr *mac;
285         int nh_ofs;
286
287         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
288                 int data_len = ntohs(opo->header.length) - sizeof *opo;
289
290                 /* FIXME: there is likely a way to reuse the data in msg. */
291                 skb = alloc_skb(data_len, GFP_ATOMIC);
292                 if (!skb)
293                         return -ENOMEM;
294
295                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
296                  * we're just transmitting this raw without examining anything
297                  * at those layers. */
298                 memcpy(skb_put(skb, data_len), opo->u.data, data_len);
299                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
300
301                 skb_set_mac_header(skb, 0);
302                 mac = vlan_eth_hdr(skb);
303                 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
304                         nh_ofs = sizeof(struct ethhdr);
305                 else
306                         nh_ofs = sizeof(struct vlan_ethhdr);
307                 skb_set_network_header(skb, nh_ofs);
308
309                 dp_output_port(chain->dp, skb, ntohs(opo->out_port));
310         } else {
311                 struct sw_flow_key key;
312                 int n_acts;
313
314                 skb = retrieve_skb(ntohl(opo->buffer_id));
315                 if (!skb)
316                         return -ESRCH;
317                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
318
319                 n_acts = (ntohs(opo->header.length) - sizeof *opo) 
320                                 / sizeof *opo->u.actions;
321                 flow_extract(skb, ntohs(opo->in_port), &key);
322                 execute_actions(chain->dp, skb, &key, opo->u.actions, n_acts);
323         }
324         return 0;
325 }
326
327 static int
328 recv_port_mod(struct sw_chain *chain, const void *msg)
329 {
330         const struct ofp_port_mod *opm = msg;
331
332         dp_update_port_flags(chain->dp, &opm->desc);
333
334         return 0;
335 }
336
337 static int
338 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
339 {
340         int error = -ENOMEM;
341         int n_acts;
342         struct sw_flow *flow;
343
344
345         /* Check number of actions. */
346         n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
347         if (n_acts > MAX_ACTIONS) {
348                 error = -E2BIG;
349                 goto error;
350         }
351
352         /* Allocate memory. */
353         flow = flow_alloc(n_acts, GFP_ATOMIC);
354         if (flow == NULL)
355                 goto error;
356
357         /* Fill out flow. */
358         flow_extract_match(&flow->key, &ofm->match);
359         flow->group_id = ntohl(ofm->group_id);
360         flow->max_idle = ntohs(ofm->max_idle);
361         flow->timeout = jiffies + flow->max_idle * HZ;
362         flow->n_actions = n_acts;
363         flow->init_time = jiffies;
364         flow->byte_count = 0;
365         flow->packet_count = 0;
366         atomic_set(&flow->deleted, 0);
367         spin_lock_init(&flow->lock);
368         memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
369
370         /* Act. */
371         error = chain_insert(chain, flow);
372         if (error)
373                 goto error_free_flow;
374         error = 0;
375         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
376                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
377                 if (skb) {
378                         struct sw_flow_key key;
379                         flow_used(flow, skb);
380                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
381                         execute_actions(chain->dp, skb, &key,
382                                         ofm->actions, n_acts);
383                 }
384                 else
385                         error = -ESRCH;
386         }
387         return error;
388
389 error_free_flow:
390         flow_free(flow);
391 error:
392         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
393                 discard_skb(ntohl(ofm->buffer_id));
394         return error;
395 }
396
397 static int
398 recv_flow(struct sw_chain *chain, const void *msg)
399 {
400         const struct ofp_flow_mod *ofm = msg;
401         uint16_t command = ntohs(ofm->command);
402
403         if (command == OFPFC_ADD) {
404                 return add_flow(chain, ofm);
405         }  else if (command == OFPFC_DELETE) {
406                 struct sw_flow_key key;
407                 flow_extract_match(&key, &ofm->match);
408                 return chain_delete(chain, &key, 0) ? 0 : -ESRCH;
409         } else if (command == OFPFC_DELETE_STRICT) {
410                 struct sw_flow_key key;
411                 flow_extract_match(&key, &ofm->match);
412                 return chain_delete(chain, &key, 1) ? 0 : -ESRCH;
413         } else {
414                 return -ENOTSUPP;
415         }
416 }
417
418 /* 'msg', which is 'length' bytes long, was received from the control path.
419  * Apply it to 'chain'. */
420 int
421 fwd_control_input(struct sw_chain *chain, const void *msg, size_t length)
422 {
423
424         struct openflow_packet {
425                 size_t min_size;
426                 int (*handler)(struct sw_chain *, const void *);
427         };
428
429         static const struct openflow_packet packets[] = {
430                 [OFPT_CONTROL_HELLO] = {
431                         sizeof (struct ofp_control_hello),
432                         recv_control_hello,
433                 },
434                 [OFPT_PACKET_OUT] = {
435                         sizeof (struct ofp_packet_out),
436                         recv_packet_out,
437                 },
438                 [OFPT_FLOW_MOD] = {
439                         sizeof (struct ofp_flow_mod),
440                         recv_flow,
441                 },
442                 [OFPT_PORT_MOD] = {
443                         sizeof (struct ofp_port_mod),
444                         recv_port_mod,
445                 },
446         };
447
448         const struct openflow_packet *pkt;
449         struct ofp_header *oh;
450
451         if (length < sizeof(struct ofp_header))
452                 return -EINVAL;
453
454         oh = (struct ofp_header *) msg;
455         if (oh->version != 1 || oh->type >= ARRAY_SIZE(packets)
456                 || ntohs(oh->length) > length)
457                 return -EINVAL;
458
459         pkt = &packets[oh->type];
460         if (!pkt->handler)
461                 return -ENOSYS;
462         if (length < pkt->min_size)
463                 return -EFAULT;
464
465         return pkt->handler(chain, msg);
466 }
467
468 /* Packet buffering. */
469
470 #define OVERWRITE_SECS  1
471 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
472
473 struct packet_buffer {
474         struct sk_buff *skb;
475         uint32_t cookie;
476         unsigned long exp_jiffies;
477 };
478
479 static struct packet_buffer buffers[N_PKT_BUFFERS];
480 static unsigned int buffer_idx;
481 static DEFINE_SPINLOCK(buffer_lock);
482
483 uint32_t fwd_save_skb(struct sk_buff *skb)
484 {
485         struct packet_buffer *p;
486         unsigned long int flags;
487         uint32_t id;
488
489         spin_lock_irqsave(&buffer_lock, flags);
490         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
491         p = &buffers[buffer_idx];
492         if (p->skb) {
493                 /* Don't buffer packet if existing entry is less than
494                  * OVERWRITE_SECS old. */
495                 if (time_before(jiffies, p->exp_jiffies)) {
496                         spin_unlock_irqrestore(&buffer_lock, flags);
497                         return -1;
498                 } else 
499                         kfree_skb(p->skb);
500         }
501         /* Don't use maximum cookie value since the all-bits-1 id is
502          * special. */
503         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
504                 p->cookie = 0;
505         skb_get(skb);
506         p->skb = skb;
507         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
508         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
509         spin_unlock_irqrestore(&buffer_lock, flags);
510
511         return id;
512 }
513
514 static struct sk_buff *retrieve_skb(uint32_t id)
515 {
516         unsigned long int flags;
517         struct sk_buff *skb = NULL;
518         struct packet_buffer *p;
519
520         spin_lock_irqsave(&buffer_lock, flags);
521         p = &buffers[id & PKT_BUFFER_MASK];
522         if (p->cookie == id >> PKT_BUFFER_BITS) {
523                 skb = p->skb;
524                 p->skb = NULL;
525         } else {
526                 printk("cookie mismatch: %x != %x\n",
527                                 id >> PKT_BUFFER_BITS, p->cookie);
528         }
529         spin_unlock_irqrestore(&buffer_lock, flags);
530
531         return skb;
532 }
533
534 static void discard_skb(uint32_t id)
535 {
536         unsigned long int flags;
537         struct packet_buffer *p;
538
539         spin_lock_irqsave(&buffer_lock, flags);
540         p = &buffers[id & PKT_BUFFER_MASK];
541         if (p->cookie == id >> PKT_BUFFER_BITS) {
542                 kfree_skb(p->skb);
543                 p->skb = NULL;
544         }
545         spin_unlock_irqrestore(&buffer_lock, flags);
546 }
547
548 void fwd_exit(void)
549 {
550         int i;
551
552         for (i = 0; i < N_PKT_BUFFERS; i++)
553                 kfree_skb(buffers[i].skb);
554 }
555
556 /* Utility functions. */
557
558 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
559  * the copy.
560  * Returns 1 if successful, 0 on failure. */
561 static int
562 make_writable(struct sk_buff **pskb)
563 {
564         /* Based on skb_make_writable() in net/netfilter/core.c. */
565         struct sk_buff *nskb;
566
567         /* Not exclusive use of packet?  Must copy. */
568         if (skb_shared(*pskb) || skb_cloned(*pskb))
569                 goto copy_skb;
570
571         return pskb_may_pull(*pskb, 64); /* FIXME? */
572
573 copy_skb:
574         nskb = skb_copy(*pskb, GFP_ATOMIC);
575         if (!nskb)
576                 return 0;
577         BUG_ON(skb_is_nonlinear(nskb));
578
579         /* Rest of kernel will get very unhappy if we pass it a
580            suddenly-orphaned skbuff */
581         if ((*pskb)->sk)
582                 skb_set_owner_w(nskb, (*pskb)->sk);
583         kfree_skb(*pskb);
584         *pskb = nskb;
585         return 1;
586 }