Implement OpenFlow statistics in switches and in dpctl.
[sliver-openvswitch.git] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/udp.h>
13 #include <linux/in6.h>
14 #include <asm/uaccess.h>
15 #include <linux/types.h>
16 #include <net/checksum.h>
17 #include "forward.h"
18 #include "datapath.h"
19 #include "chain.h"
20 #include "flow.h"
21
22 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
23
24 static void execute_actions(struct datapath *, struct sk_buff *,
25                                 const struct sw_flow_key *,
26                                 const struct ofp_action *, int n_actions);
27 static int make_writable(struct sk_buff **);
28
29 static struct sk_buff *retrieve_skb(uint32_t id);
30 static void discard_skb(uint32_t id);
31
32 /* 'skb' was received on 'in_port', a physical switch port between 0 and
33  * OFPP_MAX.  Process it according to 'chain'. */
34 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb, int in_port)
35 {
36         struct sw_flow_key key;
37         struct sw_flow *flow;
38
39         flow_extract(skb, in_port, &key);
40         flow = chain_lookup(chain, &key);
41         if (likely(flow != NULL)) {
42                 flow_used(flow, skb);
43                 execute_actions(chain->dp, skb, &key,
44                                 flow->actions, flow->n_actions);
45         } else {
46                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
47                                   chain->dp->config.miss_send_len,
48                                   OFPR_NO_MATCH);
49         }
50 }
51
52 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
53                         int out_port)
54 {
55         if (!skb)
56                 return -ENOMEM;
57         return (likely(out_port != OFPP_CONTROLLER)
58                 ? dp_output_port(dp, skb, out_port)
59                 : dp_output_control(dp, skb, fwd_save_skb(skb),
60                                          max_len, OFPR_ACTION));
61 }
62
63 static void execute_actions(struct datapath *dp, struct sk_buff *skb,
64                                 const struct sw_flow_key *key,
65                                 const struct ofp_action *actions, int n_actions)
66 {
67         /* Every output action needs a separate clone of 'skb', but the common
68          * case is just a single output action, so that doing a clone and
69          * then freeing the original skbuff is wasteful.  So the following code
70          * is slightly obscure just to avoid that. */
71         int prev_port;
72         size_t max_len=0;        /* Initialze to make compiler happy */
73         uint16_t eth_proto;
74         int i;
75
76         prev_port = -1;
77         eth_proto = ntohs(key->dl_type);
78
79         for (i = 0; i < n_actions; i++) {
80                 const struct ofp_action *a = &actions[i];
81
82                 if (prev_port != -1) {
83                         do_output(dp, skb_clone(skb, GFP_ATOMIC),
84                                   max_len, prev_port);
85                         prev_port = -1;
86                 }
87
88                 if (likely(a->type == htons(OFPAT_OUTPUT))) {
89                         prev_port = ntohs(a->arg.output.port);
90                         max_len = ntohs(a->arg.output.max_len);
91                 } else {
92                         if (!make_writable(&skb)) {
93                                 printk("make_writable failed\n");
94                                 break;
95                         }
96                         skb = execute_setter(skb, eth_proto, key, a);
97                 }
98         }
99         if (prev_port != -1)
100                 do_output(dp, skb, max_len, prev_port);
101         else
102                 kfree_skb(skb);
103 }
104
105 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
106  * covered by the sum has been changed from 'from' to 'to'.  If set,
107  * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
108  * Based on nf_proto_csum_replace4. */
109 static void update_csum(__sum16 *sum, struct sk_buff *skb,
110                         __be32 from, __be32 to, int pseudohdr)
111 {
112         __be32 diff[] = { ~from, to };
113         if (skb->ip_summed != CHECKSUM_PARTIAL) {
114                 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
115                                 ~csum_unfold(*sum)));
116                 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
117                         skb->csum = ~csum_partial((char *)diff, sizeof(diff),
118                                                 ~skb->csum);
119         } else if (pseudohdr)
120                 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
121                                 csum_unfold(*sum)));
122 }
123
124 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
125                         uint8_t nw_proto, const struct ofp_action *a)
126 {
127         if (eth_proto == ETH_P_IP) {
128                 struct iphdr *nh = ip_hdr(skb);
129                 uint32_t new, *field;
130
131                 new = a->arg.nw_addr;
132
133                 if (a->type == htons(OFPAT_SET_NW_SRC))
134                         field = &nh->saddr;
135                 else
136                         field = &nh->daddr;
137
138                 if (nw_proto == IPPROTO_TCP) {
139                         struct tcphdr *th = tcp_hdr(skb);
140                         update_csum(&th->check, skb, *field, new, 1);
141                 } else if (nw_proto == IPPROTO_UDP) {
142                         struct udphdr *th = udp_hdr(skb);
143                         update_csum(&th->check, skb, *field, new, 1);
144                 }
145                 update_csum(&nh->check, skb, *field, new, 0);
146                 *field = new;
147         }
148 }
149
150 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
151                         uint8_t nw_proto, const struct ofp_action *a)
152 {
153         if (eth_proto == ETH_P_IP) {
154                 uint16_t new, *field;
155
156                 new = a->arg.tp;
157
158                 if (nw_proto == IPPROTO_TCP) {
159                         struct tcphdr *th = tcp_hdr(skb);
160
161                         if (a->type == htons(OFPAT_SET_TP_SRC))
162                                 field = &th->source;
163                         else
164                                 field = &th->dest;
165
166                         update_csum(&th->check, skb, *field, new, 1);
167                         *field = new;
168                 } else if (nw_proto == IPPROTO_UDP) {
169                         struct udphdr *th = udp_hdr(skb);
170
171                         if (a->type == htons(OFPAT_SET_TP_SRC))
172                                 field = &th->source;
173                         else
174                                 field = &th->dest;
175
176                         update_csum(&th->check, skb, *field, new, 1);
177                         *field = new;
178                 }
179         }
180 }
181
182 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
183 {
184         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
185         struct ethhdr *eh;
186
187
188         /* Verify we were given a vlan packet */
189         if (vh->h_vlan_proto != htons(ETH_P_8021Q))
190                 return skb;
191
192         memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
193
194         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
195
196         skb->protocol = eh->h_proto;
197         skb->mac_header += VLAN_HLEN;
198
199         return skb;
200 }
201
202 static struct sk_buff *modify_vlan(struct sk_buff *skb, 
203                 const struct sw_flow_key *key, const struct ofp_action *a)
204 {
205         uint16_t new_id = a->arg.vlan_id;
206
207         if (new_id != OFP_VLAN_NONE) {
208                 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
209                         /* Modify vlan id, but maintain other TCI values */
210                         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
211                         vh->h_vlan_TCI = (vh->h_vlan_TCI 
212                                         & ~(htons(VLAN_VID_MASK))) | htons(new_id);
213                 } else  {
214                         /* Add vlan header */
215                         skb = vlan_put_tag(skb, new_id);
216                 }
217         } else  {
218                 /* Remove an existing vlan header if it exists */
219                 vlan_pull_tag(skb);
220         }
221
222         return skb;
223 }
224
225 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
226                         const struct sw_flow_key *key, const struct ofp_action *a)
227 {
228         switch (ntohs(a->type)) {
229         case OFPAT_SET_DL_VLAN:
230                 skb = modify_vlan(skb, key, a);
231                 break;
232
233         case OFPAT_SET_DL_SRC: {
234                 struct ethhdr *eh = eth_hdr(skb);
235                 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
236                 break;
237         }
238         case OFPAT_SET_DL_DST: {
239                 struct ethhdr *eh = eth_hdr(skb);
240                 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
241                 break;
242         }
243
244         case OFPAT_SET_NW_SRC:
245         case OFPAT_SET_NW_DST:
246                 modify_nh(skb, eth_proto, key->nw_proto, a);
247                 break;
248
249         case OFPAT_SET_TP_SRC:
250         case OFPAT_SET_TP_DST:
251                 modify_th(skb, eth_proto, key->nw_proto, a);
252                 break;
253         
254         default:
255                 if (net_ratelimit())
256                         printk("execute_setter: unknown action: %d\n", ntohs(a->type));
257         }
258
259         return skb;
260 }
261
262 static int
263 recv_features_request(struct sw_chain *chain, const struct sender *sender,
264                       const void *msg) 
265 {
266         return dp_send_features_reply(chain->dp, sender);
267 }
268
269 static int
270 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
271                         const void *msg)
272 {
273         return dp_send_config_reply(chain->dp, sender);
274 }
275
276 static int
277 recv_set_config(struct sw_chain *chain, const struct sender *sender,
278                 const void *msg)
279 {
280         const struct ofp_switch_config *osc = msg;
281         chain->dp->config = *osc;
282         return 0;
283 }
284
285 static int
286 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
287                 const void *msg)
288 {
289         const struct ofp_packet_out *opo = msg;
290         struct sk_buff *skb;
291         struct vlan_ethhdr *mac;
292         int nh_ofs;
293
294         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
295                 int data_len = ntohs(opo->header.length) - sizeof *opo;
296
297                 /* FIXME: there is likely a way to reuse the data in msg. */
298                 skb = alloc_skb(data_len, GFP_ATOMIC);
299                 if (!skb)
300                         return -ENOMEM;
301
302                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
303                  * we're just transmitting this raw without examining anything
304                  * at those layers. */
305                 memcpy(skb_put(skb, data_len), opo->u.data, data_len);
306                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
307
308                 skb_set_mac_header(skb, 0);
309                 mac = vlan_eth_hdr(skb);
310                 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
311                         nh_ofs = sizeof(struct ethhdr);
312                 else
313                         nh_ofs = sizeof(struct vlan_ethhdr);
314                 skb_set_network_header(skb, nh_ofs);
315
316                 dp_output_port(chain->dp, skb, ntohs(opo->out_port));
317         } else {
318                 struct sw_flow_key key;
319                 int n_acts;
320
321                 skb = retrieve_skb(ntohl(opo->buffer_id));
322                 if (!skb)
323                         return -ESRCH;
324                 dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
325
326                 n_acts = (ntohs(opo->header.length) - sizeof *opo) 
327                                 / sizeof *opo->u.actions;
328                 flow_extract(skb, ntohs(opo->in_port), &key);
329                 execute_actions(chain->dp, skb, &key, opo->u.actions, n_acts);
330         }
331         return 0;
332 }
333
334 static int
335 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
336               const void *msg)
337 {
338         const struct ofp_port_mod *opm = msg;
339
340         dp_update_port_flags(chain->dp, &opm->desc);
341
342         return 0;
343 }
344
345 static int
346 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
347 {
348         int error = -ENOMEM;
349         int n_acts;
350         struct sw_flow *flow;
351
352
353         /* Check number of actions. */
354         n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
355         if (n_acts > MAX_ACTIONS) {
356                 error = -E2BIG;
357                 goto error;
358         }
359
360         /* Allocate memory. */
361         flow = flow_alloc(n_acts, GFP_ATOMIC);
362         if (flow == NULL)
363                 goto error;
364
365         /* Fill out flow. */
366         flow_extract_match(&flow->key, &ofm->match);
367         flow->group_id = ntohl(ofm->group_id);
368         flow->max_idle = ntohs(ofm->max_idle);
369         flow->timeout = jiffies + flow->max_idle * HZ;
370         flow->n_actions = n_acts;
371         flow->init_time = jiffies;
372         flow->byte_count = 0;
373         flow->packet_count = 0;
374         atomic_set(&flow->deleted, 0);
375         spin_lock_init(&flow->lock);
376         memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
377
378         /* Act. */
379         error = chain_insert(chain, flow);
380         if (error)
381                 goto error_free_flow;
382         error = 0;
383         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
384                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
385                 if (skb) {
386                         struct sw_flow_key key;
387                         flow_used(flow, skb);
388                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
389                         execute_actions(chain->dp, skb, &key,
390                                         ofm->actions, n_acts);
391                 }
392                 else
393                         error = -ESRCH;
394         }
395         return error;
396
397 error_free_flow:
398         flow_free(flow);
399 error:
400         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
401                 discard_skb(ntohl(ofm->buffer_id));
402         return error;
403 }
404
405 static int
406 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
407 {
408         const struct ofp_flow_mod *ofm = msg;
409         uint16_t command = ntohs(ofm->command);
410
411         if (command == OFPFC_ADD) {
412                 return add_flow(chain, ofm);
413         }  else if (command == OFPFC_DELETE) {
414                 struct sw_flow_key key;
415                 flow_extract_match(&key, &ofm->match);
416                 return chain_delete(chain, &key, 0) ? 0 : -ESRCH;
417         } else if (command == OFPFC_DELETE_STRICT) {
418                 struct sw_flow_key key;
419                 flow_extract_match(&key, &ofm->match);
420                 return chain_delete(chain, &key, 1) ? 0 : -ESRCH;
421         } else {
422                 return -ENOTSUPP;
423         }
424 }
425
426 static int
427 recv_flow_status_request(struct sw_chain *chain, const struct sender *sender,
428                          const void *msg)
429 {
430         const struct ofp_flow_stat_request *fsr = msg;
431         if (fsr->type == OFPFS_INDIV) {
432                 return dp_send_flow_stats(chain->dp, sender, &fsr->match); 
433         } else {
434                 /* FIXME */
435                 return -ENOTSUPP;
436         }
437 }
438
439 static int
440 recv_port_status_request(struct sw_chain *chain, const struct sender *sender,
441                          const void *msg)
442 {
443         return dp_send_port_stats(chain->dp, sender);
444 }
445
446 static int
447 recv_table_status_request(struct sw_chain *chain, const struct sender *sender,
448                           const void *msg)
449 {
450         return dp_send_table_stats(chain->dp, sender);
451 }
452
453 /* 'msg', which is 'length' bytes long, was received across Netlink from
454  * 'sender'.  Apply it to 'chain'. */
455 int
456 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
457                   const void *msg, size_t length)
458 {
459
460         struct openflow_packet {
461                 size_t min_size;
462                 int (*handler)(struct sw_chain *, const struct sender *,
463                                const void *);
464         };
465
466         static const struct openflow_packet packets[] = {
467                 [OFPT_FEATURES_REQUEST] = {
468                         sizeof (struct ofp_header),
469                         recv_features_request,
470                 },
471                 [OFPT_GET_CONFIG_REQUEST] = {
472                         sizeof (struct ofp_header),
473                         recv_get_config_request,
474                 },
475                 [OFPT_SET_CONFIG] = {
476                         sizeof (struct ofp_switch_config),
477                         recv_set_config,
478                 },
479                 [OFPT_PACKET_OUT] = {
480                         sizeof (struct ofp_packet_out),
481                         recv_packet_out,
482                 },
483                 [OFPT_FLOW_MOD] = {
484                         sizeof (struct ofp_flow_mod),
485                         recv_flow,
486                 },
487                 [OFPT_PORT_MOD] = {
488                         sizeof (struct ofp_port_mod),
489                         recv_port_mod,
490                 },
491                 [OFPT_FLOW_STAT_REQUEST] = {
492                         sizeof (struct ofp_flow_stat_request),
493                         recv_flow_status_request,
494                 },
495                 [OFPT_PORT_STAT_REQUEST] = {
496                         sizeof (struct ofp_port_stat_request),
497                         recv_port_status_request,
498                 },
499                 [OFPT_TABLE_STAT_REQUEST] = {
500                         sizeof (struct ofp_table_stat_request),
501                         recv_table_status_request,
502                 },
503         };
504
505         const struct openflow_packet *pkt;
506         struct ofp_header *oh;
507
508         oh = (struct ofp_header *) msg;
509         if (oh->version != 1 || oh->type >= ARRAY_SIZE(packets)
510                 || ntohs(oh->length) > length)
511                 return -EINVAL;
512
513         pkt = &packets[oh->type];
514         if (!pkt->handler)
515                 return -ENOSYS;
516         if (length < pkt->min_size)
517                 return -EFAULT;
518
519         return pkt->handler(chain, sender, msg);
520 }
521
522 /* Packet buffering. */
523
524 #define OVERWRITE_SECS  1
525 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
526
527 struct packet_buffer {
528         struct sk_buff *skb;
529         uint32_t cookie;
530         unsigned long exp_jiffies;
531 };
532
533 static struct packet_buffer buffers[N_PKT_BUFFERS];
534 static unsigned int buffer_idx;
535 static DEFINE_SPINLOCK(buffer_lock);
536
537 uint32_t fwd_save_skb(struct sk_buff *skb)
538 {
539         struct packet_buffer *p;
540         unsigned long int flags;
541         uint32_t id;
542
543         spin_lock_irqsave(&buffer_lock, flags);
544         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
545         p = &buffers[buffer_idx];
546         if (p->skb) {
547                 /* Don't buffer packet if existing entry is less than
548                  * OVERWRITE_SECS old. */
549                 if (time_before(jiffies, p->exp_jiffies)) {
550                         spin_unlock_irqrestore(&buffer_lock, flags);
551                         return -1;
552                 } else 
553                         kfree_skb(p->skb);
554         }
555         /* Don't use maximum cookie value since the all-bits-1 id is
556          * special. */
557         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
558                 p->cookie = 0;
559         skb_get(skb);
560         p->skb = skb;
561         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
562         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
563         spin_unlock_irqrestore(&buffer_lock, flags);
564
565         return id;
566 }
567
568 static struct sk_buff *retrieve_skb(uint32_t id)
569 {
570         unsigned long int flags;
571         struct sk_buff *skb = NULL;
572         struct packet_buffer *p;
573
574         spin_lock_irqsave(&buffer_lock, flags);
575         p = &buffers[id & PKT_BUFFER_MASK];
576         if (p->cookie == id >> PKT_BUFFER_BITS) {
577                 skb = p->skb;
578                 p->skb = NULL;
579         } else {
580                 printk("cookie mismatch: %x != %x\n",
581                                 id >> PKT_BUFFER_BITS, p->cookie);
582         }
583         spin_unlock_irqrestore(&buffer_lock, flags);
584
585         return skb;
586 }
587
588 static void discard_skb(uint32_t id)
589 {
590         unsigned long int flags;
591         struct packet_buffer *p;
592
593         spin_lock_irqsave(&buffer_lock, flags);
594         p = &buffers[id & PKT_BUFFER_MASK];
595         if (p->cookie == id >> PKT_BUFFER_BITS) {
596                 kfree_skb(p->skb);
597                 p->skb = NULL;
598         }
599         spin_unlock_irqrestore(&buffer_lock, flags);
600 }
601
602 void fwd_exit(void)
603 {
604         int i;
605
606         for (i = 0; i < N_PKT_BUFFERS; i++)
607                 kfree_skb(buffers[i].skb);
608 }
609
610 /* Utility functions. */
611
612 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
613  * the copy.
614  * Returns 1 if successful, 0 on failure. */
615 static int
616 make_writable(struct sk_buff **pskb)
617 {
618         /* Based on skb_make_writable() in net/netfilter/core.c. */
619         struct sk_buff *nskb;
620
621         /* Not exclusive use of packet?  Must copy. */
622         if (skb_shared(*pskb) || skb_cloned(*pskb))
623                 goto copy_skb;
624
625         return pskb_may_pull(*pskb, 64); /* FIXME? */
626
627 copy_skb:
628         nskb = skb_copy(*pskb, GFP_ATOMIC);
629         if (!nskb)
630                 return 0;
631         BUG_ON(skb_is_nonlinear(nskb));
632
633         /* Rest of kernel will get very unhappy if we pass it a
634            suddenly-orphaned skbuff */
635         if ((*pskb)->sk)
636                 skb_set_owner_w(nskb, (*pskb)->sk);
637         kfree_skb(*pskb);
638         *pskb = nskb;
639         return 1;
640 }