Properly track table match counts.
[sliver-openvswitch.git] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/udp.h>
13 #include <linux/in6.h>
14 #include <asm/uaccess.h>
15 #include <linux/types.h>
16 #include <net/checksum.h>
17 #include "forward.h"
18 #include "datapath.h"
19 #include "chain.h"
20 #include "flow.h"
21
22 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
23
24 static int make_writable(struct sk_buff **);
25
26 static struct sk_buff *retrieve_skb(uint32_t id);
27 static void discard_skb(uint32_t id);
28
29 /* 'skb' was received on 'in_port', a physical switch port between 0 and
30  * OFPP_MAX.  Process it according to 'chain'. */
31 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb, int in_port)
32 {
33         struct sw_flow_key key;
34         struct sw_flow *flow;
35
36         flow_extract(skb, in_port, &key);
37         flow = chain_lookup(chain, &key);
38         if (likely(flow != NULL)) {
39                 flow_used(flow, skb);
40                 execute_actions(chain->dp, skb, &key,
41                                 flow->actions, flow->n_actions);
42         } else {
43                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
44                                   chain->dp->miss_send_len,
45                                   OFPR_NO_MATCH);
46         }
47 }
48
49 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
50                         int out_port)
51 {
52         if (!skb)
53                 return -ENOMEM;
54         return (likely(out_port != OFPP_CONTROLLER)
55                 ? dp_output_port(dp, skb, out_port)
56                 : dp_output_control(dp, skb, fwd_save_skb(skb),
57                                          max_len, OFPR_ACTION));
58 }
59
60 void execute_actions(struct datapath *dp, struct sk_buff *skb,
61                                 const struct sw_flow_key *key,
62                                 const struct ofp_action *actions, int n_actions)
63 {
64         /* Every output action needs a separate clone of 'skb', but the common
65          * case is just a single output action, so that doing a clone and
66          * then freeing the original skbuff is wasteful.  So the following code
67          * is slightly obscure just to avoid that. */
68         int prev_port;
69         size_t max_len=0;        /* Initialze to make compiler happy */
70         uint16_t eth_proto;
71         int i;
72
73         prev_port = -1;
74         eth_proto = ntohs(key->dl_type);
75
76         for (i = 0; i < n_actions; i++) {
77                 const struct ofp_action *a = &actions[i];
78
79                 if (prev_port != -1) {
80                         do_output(dp, skb_clone(skb, GFP_ATOMIC),
81                                   max_len, prev_port);
82                         prev_port = -1;
83                 }
84
85                 if (likely(a->type == htons(OFPAT_OUTPUT))) {
86                         prev_port = ntohs(a->arg.output.port);
87                         max_len = ntohs(a->arg.output.max_len);
88                 } else {
89                         if (!make_writable(&skb)) {
90                                 if (net_ratelimit())
91                                     printk("make_writable failed\n");
92                                 break;
93                         }
94                         skb = execute_setter(skb, eth_proto, key, a);
95                         if (!skb) {
96                                 if (net_ratelimit())
97                                         printk("execute_setter lost skb\n");
98                                 return;
99                         }
100                 }
101         }
102         if (prev_port != -1)
103                 do_output(dp, skb, max_len, prev_port);
104         else
105                 kfree_skb(skb);
106 }
107
108 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
109  * covered by the sum has been changed from 'from' to 'to'.  If set,
110  * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
111  * Based on nf_proto_csum_replace4. */
112 static void update_csum(__sum16 *sum, struct sk_buff *skb,
113                         __be32 from, __be32 to, int pseudohdr)
114 {
115         __be32 diff[] = { ~from, to };
116         if (skb->ip_summed != CHECKSUM_PARTIAL) {
117                 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
118                                 ~csum_unfold(*sum)));
119                 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
120                         skb->csum = ~csum_partial((char *)diff, sizeof(diff),
121                                                 ~skb->csum);
122         } else if (pseudohdr)
123                 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
124                                 csum_unfold(*sum)));
125 }
126
127 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
128                         uint8_t nw_proto, const struct ofp_action *a)
129 {
130         if (eth_proto == ETH_P_IP) {
131                 struct iphdr *nh = ip_hdr(skb);
132                 uint32_t new, *field;
133
134                 new = a->arg.nw_addr;
135
136                 if (a->type == htons(OFPAT_SET_NW_SRC))
137                         field = &nh->saddr;
138                 else
139                         field = &nh->daddr;
140
141                 if (nw_proto == IPPROTO_TCP) {
142                         struct tcphdr *th = tcp_hdr(skb);
143                         update_csum(&th->check, skb, *field, new, 1);
144                 } else if (nw_proto == IPPROTO_UDP) {
145                         struct udphdr *th = udp_hdr(skb);
146                         update_csum(&th->check, skb, *field, new, 1);
147                 }
148                 update_csum(&nh->check, skb, *field, new, 0);
149                 *field = new;
150         }
151 }
152
153 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
154                         uint8_t nw_proto, const struct ofp_action *a)
155 {
156         if (eth_proto == ETH_P_IP) {
157                 uint16_t new, *field;
158
159                 new = a->arg.tp;
160
161                 if (nw_proto == IPPROTO_TCP) {
162                         struct tcphdr *th = tcp_hdr(skb);
163
164                         if (a->type == htons(OFPAT_SET_TP_SRC))
165                                 field = &th->source;
166                         else
167                                 field = &th->dest;
168
169                         update_csum(&th->check, skb, *field, new, 1);
170                         *field = new;
171                 } else if (nw_proto == IPPROTO_UDP) {
172                         struct udphdr *th = udp_hdr(skb);
173
174                         if (a->type == htons(OFPAT_SET_TP_SRC))
175                                 field = &th->source;
176                         else
177                                 field = &th->dest;
178
179                         update_csum(&th->check, skb, *field, new, 1);
180                         *field = new;
181                 }
182         }
183 }
184
185 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
186 {
187         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
188         struct ethhdr *eh;
189
190
191         /* Verify we were given a vlan packet */
192         if (vh->h_vlan_proto != htons(ETH_P_8021Q))
193                 return skb;
194
195         memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
196
197         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
198
199         skb->protocol = eh->h_proto;
200         skb->mac_header += VLAN_HLEN;
201
202         return skb;
203 }
204
205 static struct sk_buff *modify_vlan(struct sk_buff *skb, 
206                 const struct sw_flow_key *key, const struct ofp_action *a)
207 {
208         uint16_t new_id = ntohs(a->arg.vlan_id);
209
210         if (new_id != OFP_VLAN_NONE) {
211                 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
212                         /* Modify vlan id, but maintain other TCI values */
213                         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
214                         vh->h_vlan_TCI = (vh->h_vlan_TCI 
215                                         & ~(htons(VLAN_VID_MASK))) | a->arg.vlan_id;
216                 } else  {
217                         /* Add vlan header */
218
219                         /* xxx The vlan_put_tag function, doesn't seem to work
220                          * xxx reliably when it attempts to use the hardware-accelerated
221                          * xxx version.  We'll directly use the software version
222                          * xxx until the problem can be diagnosed.
223                          */
224                         skb = __vlan_put_tag(skb, new_id);
225                 }
226         } else  {
227                 /* Remove an existing vlan header if it exists */
228                 vlan_pull_tag(skb);
229         }
230
231         return skb;
232 }
233
234 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
235                         const struct sw_flow_key *key, const struct ofp_action *a)
236 {
237         switch (ntohs(a->type)) {
238         case OFPAT_SET_DL_VLAN:
239                 skb = modify_vlan(skb, key, a);
240                 break;
241
242         case OFPAT_SET_DL_SRC: {
243                 struct ethhdr *eh = eth_hdr(skb);
244                 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
245                 break;
246         }
247         case OFPAT_SET_DL_DST: {
248                 struct ethhdr *eh = eth_hdr(skb);
249                 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
250                 break;
251         }
252
253         case OFPAT_SET_NW_SRC:
254         case OFPAT_SET_NW_DST:
255                 modify_nh(skb, eth_proto, key->nw_proto, a);
256                 break;
257
258         case OFPAT_SET_TP_SRC:
259         case OFPAT_SET_TP_DST:
260                 modify_th(skb, eth_proto, key->nw_proto, a);
261                 break;
262         
263         default:
264                 if (net_ratelimit())
265                         printk("execute_setter: unknown action: %d\n", ntohs(a->type));
266         }
267
268         return skb;
269 }
270
271 static int
272 recv_features_request(struct sw_chain *chain, const struct sender *sender,
273                       const void *msg) 
274 {
275         return dp_send_features_reply(chain->dp, sender);
276 }
277
278 static int
279 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
280                         const void *msg)
281 {
282         return dp_send_config_reply(chain->dp, sender);
283 }
284
285 static int
286 recv_set_config(struct sw_chain *chain, const struct sender *sender,
287                 const void *msg)
288 {
289         const struct ofp_switch_config *osc = msg;
290
291         chain->dp->flags = ntohs(osc->flags);
292         chain->dp->miss_send_len = ntohs(osc->miss_send_len);
293
294         return 0;
295 }
296
297 static int
298 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
299                 const void *msg)
300 {
301         const struct ofp_packet_out *opo = msg;
302         struct sk_buff *skb;
303         struct vlan_ethhdr *mac;
304         int nh_ofs;
305
306         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
307                 int data_len = ntohs(opo->header.length) - sizeof *opo;
308
309                 /* FIXME: there is likely a way to reuse the data in msg. */
310                 skb = alloc_skb(data_len, GFP_ATOMIC);
311                 if (!skb)
312                         return -ENOMEM;
313
314                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
315                  * we're just transmitting this raw without examining anything
316                  * at those layers. */
317                 memcpy(skb_put(skb, data_len), opo->u.data, data_len);
318                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
319
320                 skb_set_mac_header(skb, 0);
321                 mac = vlan_eth_hdr(skb);
322                 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
323                         nh_ofs = sizeof(struct ethhdr);
324                 else
325                         nh_ofs = sizeof(struct vlan_ethhdr);
326                 skb_set_network_header(skb, nh_ofs);
327
328                 dp_output_port(chain->dp, skb, ntohs(opo->out_port));
329         } else {
330                 struct sw_flow_key key;
331                 int n_acts;
332
333                 skb = retrieve_skb(ntohl(opo->buffer_id));
334                 if (!skb)
335                         return -ESRCH;
336                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
337
338                 n_acts = (ntohs(opo->header.length) - sizeof *opo) 
339                                 / sizeof *opo->u.actions;
340                 flow_extract(skb, ntohs(opo->in_port), &key);
341                 execute_actions(chain->dp, skb, &key, opo->u.actions, n_acts);
342         }
343         return 0;
344 }
345
346 static int
347 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
348               const void *msg)
349 {
350         const struct ofp_port_mod *opm = msg;
351
352         dp_update_port_flags(chain->dp, &opm->desc);
353
354         return 0;
355 }
356
357 static int
358 recv_echo_request(struct sw_chain *chain, const struct sender *sender,
359                   const void *msg) 
360 {
361         return dp_send_echo_reply(chain->dp, sender, msg);
362 }
363
364 static int
365 recv_echo_reply(struct sw_chain *chain, const struct sender *sender,
366                   const void *msg) 
367 {
368         return 0;
369 }
370
371 static int
372 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
373 {
374         int error = -ENOMEM;
375         int i;
376         int n_acts;
377         struct sw_flow *flow;
378
379
380         /* To prevent loops, make sure there's no action to send to the
381          * OFP_TABLE virtual port.
382          */
383         n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
384         for (i=0; i<n_acts; i++) {
385                 const struct ofp_action *a = &ofm->actions[i];
386
387                 if (a->type == htons(OFPAT_OUTPUT) 
388                                         && (a->arg.output.port == htons(OFPP_TABLE) 
389                                                 || a->arg.output.port == htons(OFPP_NONE))) {
390                         /* xxx Send fancy new error message? */
391                         goto error;
392                 }
393         }
394
395         /* Allocate memory. */
396         flow = flow_alloc(n_acts, GFP_ATOMIC);
397         if (flow == NULL)
398                 goto error;
399
400         /* Fill out flow. */
401         flow_extract_match(&flow->key, &ofm->match);
402         flow->max_idle = ntohs(ofm->max_idle);
403         flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
404         flow->timeout = jiffies + flow->max_idle * HZ;
405         flow->n_actions = n_acts;
406         flow->init_time = jiffies;
407         flow->byte_count = 0;
408         flow->packet_count = 0;
409         spin_lock_init(&flow->lock);
410         memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
411
412         /* Act. */
413         error = chain_insert(chain, flow);
414         if (error)
415                 goto error_free_flow;
416         error = 0;
417         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
418                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
419                 if (skb) {
420                         struct sw_flow_key key;
421                         flow_used(flow, skb);
422                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
423                         execute_actions(chain->dp, skb, &key,
424                                         ofm->actions, n_acts);
425                 }
426                 else
427                         error = -ESRCH;
428         }
429         return error;
430
431 error_free_flow:
432         flow_free(flow);
433 error:
434         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
435                 discard_skb(ntohl(ofm->buffer_id));
436         return error;
437 }
438
439 static int
440 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
441 {
442         const struct ofp_flow_mod *ofm = msg;
443         uint16_t command = ntohs(ofm->command);
444
445         if (command == OFPFC_ADD) {
446                 return add_flow(chain, ofm);
447         }  else if (command == OFPFC_DELETE) {
448                 struct sw_flow_key key;
449                 flow_extract_match(&key, &ofm->match);
450                 return chain_delete(chain, &key, 0, 0) ? 0 : -ESRCH;
451         } else if (command == OFPFC_DELETE_STRICT) {
452                 struct sw_flow_key key;
453                 uint16_t priority;
454                 flow_extract_match(&key, &ofm->match);
455                 priority = key.wildcards ? ntohs(ofm->priority) : -1;
456                 return chain_delete(chain, &key, priority, 1) ? 0 : -ESRCH;
457         } else {
458                 return -ENOTSUPP;
459         }
460 }
461
462 /* 'msg', which is 'length' bytes long, was received across Netlink from
463  * 'sender'.  Apply it to 'chain'. */
464 int
465 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
466                   const void *msg, size_t length)
467 {
468
469         struct openflow_packet {
470                 size_t min_size;
471                 int (*handler)(struct sw_chain *, const struct sender *,
472                                const void *);
473         };
474
475         static const struct openflow_packet packets[] = {
476                 [OFPT_FEATURES_REQUEST] = {
477                         sizeof (struct ofp_header),
478                         recv_features_request,
479                 },
480                 [OFPT_GET_CONFIG_REQUEST] = {
481                         sizeof (struct ofp_header),
482                         recv_get_config_request,
483                 },
484                 [OFPT_SET_CONFIG] = {
485                         sizeof (struct ofp_switch_config),
486                         recv_set_config,
487                 },
488                 [OFPT_PACKET_OUT] = {
489                         sizeof (struct ofp_packet_out),
490                         recv_packet_out,
491                 },
492                 [OFPT_FLOW_MOD] = {
493                         sizeof (struct ofp_flow_mod),
494                         recv_flow,
495                 },
496                 [OFPT_PORT_MOD] = {
497                         sizeof (struct ofp_port_mod),
498                         recv_port_mod,
499                 },
500                 [OFPT_ECHO_REQUEST] = {
501                         sizeof (struct ofp_header),
502                         recv_echo_request,
503                 },
504                 [OFPT_ECHO_REPLY] = {
505                         sizeof (struct ofp_header),
506                         recv_echo_reply,
507                 },
508         };
509
510         const struct openflow_packet *pkt;
511         struct ofp_header *oh;
512
513         oh = (struct ofp_header *) msg;
514         if (oh->version != OFP_VERSION || oh->type >= ARRAY_SIZE(packets)
515                 || ntohs(oh->length) > length)
516                 return -EINVAL;
517
518         pkt = &packets[oh->type];
519         if (!pkt->handler)
520                 return -ENOSYS;
521         if (length < pkt->min_size)
522                 return -EFAULT;
523
524         return pkt->handler(chain, sender, msg);
525 }
526
527 /* Packet buffering. */
528
529 #define OVERWRITE_SECS  1
530 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
531
532 struct packet_buffer {
533         struct sk_buff *skb;
534         uint32_t cookie;
535         unsigned long exp_jiffies;
536 };
537
538 static struct packet_buffer buffers[N_PKT_BUFFERS];
539 static unsigned int buffer_idx;
540 static DEFINE_SPINLOCK(buffer_lock);
541
542 uint32_t fwd_save_skb(struct sk_buff *skb)
543 {
544         struct packet_buffer *p;
545         unsigned long int flags;
546         uint32_t id;
547
548         spin_lock_irqsave(&buffer_lock, flags);
549         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
550         p = &buffers[buffer_idx];
551         if (p->skb) {
552                 /* Don't buffer packet if existing entry is less than
553                  * OVERWRITE_SECS old. */
554                 if (time_before(jiffies, p->exp_jiffies)) {
555                         spin_unlock_irqrestore(&buffer_lock, flags);
556                         return -1;
557                 } else 
558                         kfree_skb(p->skb);
559         }
560         /* Don't use maximum cookie value since the all-bits-1 id is
561          * special. */
562         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
563                 p->cookie = 0;
564         skb_get(skb);
565         p->skb = skb;
566         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
567         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
568         spin_unlock_irqrestore(&buffer_lock, flags);
569
570         return id;
571 }
572
573 static struct sk_buff *retrieve_skb(uint32_t id)
574 {
575         unsigned long int flags;
576         struct sk_buff *skb = NULL;
577         struct packet_buffer *p;
578
579         spin_lock_irqsave(&buffer_lock, flags);
580         p = &buffers[id & PKT_BUFFER_MASK];
581         if (p->cookie == id >> PKT_BUFFER_BITS) {
582                 skb = p->skb;
583                 p->skb = NULL;
584         } else {
585                 printk("cookie mismatch: %x != %x\n",
586                                 id >> PKT_BUFFER_BITS, p->cookie);
587         }
588         spin_unlock_irqrestore(&buffer_lock, flags);
589
590         return skb;
591 }
592
593 void fwd_discard_all(void) 
594 {
595         unsigned long int flags;
596         int i;
597
598         spin_lock_irqsave(&buffer_lock, flags);
599         for (i = 0; i < N_PKT_BUFFERS; i++) {
600                 kfree_skb(buffers[i].skb);
601                 buffers[i].skb = NULL;
602         }
603         spin_unlock_irqrestore(&buffer_lock, flags);
604 }
605
606 static void discard_skb(uint32_t id)
607 {
608         unsigned long int flags;
609         struct packet_buffer *p;
610
611         spin_lock_irqsave(&buffer_lock, flags);
612         p = &buffers[id & PKT_BUFFER_MASK];
613         if (p->cookie == id >> PKT_BUFFER_BITS) {
614                 kfree_skb(p->skb);
615                 p->skb = NULL;
616         }
617         spin_unlock_irqrestore(&buffer_lock, flags);
618 }
619
620 void fwd_exit(void)
621 {
622         fwd_discard_all();
623 }
624
625 /* Utility functions. */
626
627 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
628  * the copy.
629  * Returns 1 if successful, 0 on failure. */
630 static int
631 make_writable(struct sk_buff **pskb)
632 {
633         /* Based on skb_make_writable() in net/netfilter/core.c. */
634         struct sk_buff *nskb;
635
636         /* Not exclusive use of packet?  Must copy. */
637         if (skb_shared(*pskb) || skb_cloned(*pskb))
638                 goto copy_skb;
639
640         return pskb_may_pull(*pskb, 40); /* FIXME? */
641
642 copy_skb:
643         nskb = skb_copy(*pskb, GFP_ATOMIC);
644         if (!nskb)
645                 return 0;
646         BUG_ON(skb_is_nonlinear(nskb));
647
648         /* Rest of kernel will get very unhappy if we pass it a
649            suddenly-orphaned skbuff */
650         if ((*pskb)->sk)
651                 skb_set_owner_w(nskb, (*pskb)->sk);
652         kfree_skb(*pskb);
653         *pskb = nskb;
654         return 1;
655 }