Make switches send error messages when they receive a bad request.
[sliver-openvswitch.git] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <linux/in.h>
12 #include <linux/ip.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <asm/uaccess.h>
17 #include <linux/types.h>
18 #include <net/checksum.h>
19 #include "forward.h"
20 #include "datapath.h"
21 #include "chain.h"
22 #include "flow.h"
23
24 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
25
26 static int make_writable(struct sk_buff **);
27
28 static struct sk_buff *retrieve_skb(uint32_t id);
29 static void discard_skb(uint32_t id);
30
31 /* 'skb' was received on port 'p', which may be a physical switch port, the
32  * local port, or a null pointer.  Process it according to 'chain'.  Returns 0
33  * if successful, in which case 'skb' is destroyed, or -ESRCH if there is no
34  * matching flow, in which case 'skb' still belongs to the caller. */
35 int run_flow_through_tables(struct sw_chain *chain, struct sk_buff *skb,
36                             struct net_bridge_port *p)
37 {
38         /* Ethernet address used as the destination for STP frames. */
39         static const uint8_t stp_eth_addr[ETH_ALEN]
40                 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
41         struct sw_flow_key key;
42         struct sw_flow *flow;
43
44         if (flow_extract(skb, p ? p->port_no : OFPP_NONE, &key)
45             && (chain->dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
46                 /* Drop fragment. */
47                 kfree_skb(skb);
48                 return 0;
49         }
50         if (p && p->flags & (OFPPFL_NO_RECV | OFPPFL_NO_RECV_STP) &&
51             p->flags & (compare_ether_addr(key.dl_dst, stp_eth_addr)
52                         ? OFPPFL_NO_RECV : OFPPFL_NO_RECV_STP)) {
53                 kfree_skb(skb);
54                 return 0;
55         }
56
57         flow = chain_lookup(chain, &key);
58         if (likely(flow != NULL)) {
59                 flow_used(flow, skb);
60                 execute_actions(chain->dp, skb, &key,
61                                 flow->actions, flow->n_actions, 0);
62                 return 0;
63         } else {
64                 return -ESRCH;
65         }
66 }
67
68 /* 'skb' was received on port 'p', which may be a physical switch port, the
69  * local port, or a null pointer.  Process it according to 'chain', sending it
70  * up to the controller if no flow matches.  Takes ownership of 'skb'. */
71 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb,
72                     struct net_bridge_port *p)
73 {
74         if (run_flow_through_tables(chain, skb, p))
75                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
76                                   chain->dp->miss_send_len,
77                                   OFPR_NO_MATCH);
78 }
79
80 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
81                      int out_port, int ignore_no_fwd)
82 {
83         if (!skb)
84                 return -ENOMEM;
85         return (likely(out_port != OFPP_CONTROLLER)
86                 ? dp_output_port(dp, skb, out_port, ignore_no_fwd)
87                 : dp_output_control(dp, skb, fwd_save_skb(skb),
88                                          max_len, OFPR_ACTION));
89 }
90
91 void execute_actions(struct datapath *dp, struct sk_buff *skb,
92                      const struct sw_flow_key *key,
93                      const struct ofp_action *actions, int n_actions,
94                      int ignore_no_fwd)
95 {
96         /* Every output action needs a separate clone of 'skb', but the common
97          * case is just a single output action, so that doing a clone and
98          * then freeing the original skbuff is wasteful.  So the following code
99          * is slightly obscure just to avoid that. */
100         int prev_port;
101         size_t max_len=0;        /* Initialze to make compiler happy */
102         uint16_t eth_proto;
103         int i;
104
105         prev_port = -1;
106         eth_proto = ntohs(key->dl_type);
107
108         for (i = 0; i < n_actions; i++) {
109                 const struct ofp_action *a = &actions[i];
110
111                 if (prev_port != -1) {
112                         do_output(dp, skb_clone(skb, GFP_ATOMIC),
113                                   max_len, prev_port, ignore_no_fwd);
114                         prev_port = -1;
115                 }
116
117                 if (likely(a->type == htons(OFPAT_OUTPUT))) {
118                         prev_port = ntohs(a->arg.output.port);
119                         max_len = ntohs(a->arg.output.max_len);
120                 } else {
121                         if (!make_writable(&skb)) {
122                                 if (net_ratelimit())
123                                     printk("make_writable failed\n");
124                                 break;
125                         }
126                         skb = execute_setter(skb, eth_proto, key, a);
127                         if (!skb) {
128                                 if (net_ratelimit())
129                                         printk("execute_setter lost skb\n");
130                                 return;
131                         }
132                 }
133         }
134         if (prev_port != -1)
135                 do_output(dp, skb, max_len, prev_port, ignore_no_fwd);
136         else
137                 kfree_skb(skb);
138 }
139
140 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
141  * covered by the sum has been changed from 'from' to 'to'.  If set,
142  * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
143  * Based on nf_proto_csum_replace4. */
144 static void update_csum(__sum16 *sum, struct sk_buff *skb,
145                         __be32 from, __be32 to, int pseudohdr)
146 {
147         __be32 diff[] = { ~from, to };
148         if (skb->ip_summed != CHECKSUM_PARTIAL) {
149                 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
150                                 ~csum_unfold(*sum)));
151                 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
152                         skb->csum = ~csum_partial((char *)diff, sizeof(diff),
153                                                 ~skb->csum);
154         } else if (pseudohdr)
155                 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
156                                 csum_unfold(*sum)));
157 }
158
159 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
160                         uint8_t nw_proto, const struct ofp_action *a)
161 {
162         if (eth_proto == ETH_P_IP) {
163                 struct iphdr *nh = ip_hdr(skb);
164                 uint32_t new, *field;
165
166                 new = a->arg.nw_addr;
167
168                 if (a->type == htons(OFPAT_SET_NW_SRC))
169                         field = &nh->saddr;
170                 else
171                         field = &nh->daddr;
172
173                 if (nw_proto == IPPROTO_TCP) {
174                         struct tcphdr *th = tcp_hdr(skb);
175                         update_csum(&th->check, skb, *field, new, 1);
176                 } else if (nw_proto == IPPROTO_UDP) {
177                         struct udphdr *th = udp_hdr(skb);
178                         update_csum(&th->check, skb, *field, new, 1);
179                 }
180                 update_csum(&nh->check, skb, *field, new, 0);
181                 *field = new;
182         }
183 }
184
185 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
186                         uint8_t nw_proto, const struct ofp_action *a)
187 {
188         if (eth_proto == ETH_P_IP) {
189                 uint16_t new, *field;
190
191                 new = a->arg.tp;
192
193                 if (nw_proto == IPPROTO_TCP) {
194                         struct tcphdr *th = tcp_hdr(skb);
195
196                         if (a->type == htons(OFPAT_SET_TP_SRC))
197                                 field = &th->source;
198                         else
199                                 field = &th->dest;
200
201                         update_csum(&th->check, skb, *field, new, 1);
202                         *field = new;
203                 } else if (nw_proto == IPPROTO_UDP) {
204                         struct udphdr *th = udp_hdr(skb);
205
206                         if (a->type == htons(OFPAT_SET_TP_SRC))
207                                 field = &th->source;
208                         else
209                                 field = &th->dest;
210
211                         update_csum(&th->check, skb, *field, new, 1);
212                         *field = new;
213                 }
214         }
215 }
216
217 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
218 {
219         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
220         struct ethhdr *eh;
221
222
223         /* Verify we were given a vlan packet */
224         if (vh->h_vlan_proto != htons(ETH_P_8021Q))
225                 return skb;
226
227         memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
228
229         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
230
231         skb->protocol = eh->h_proto;
232         skb->mac_header += VLAN_HLEN;
233
234         return skb;
235 }
236
237 static struct sk_buff *modify_vlan(struct sk_buff *skb, 
238                 const struct sw_flow_key *key, const struct ofp_action *a)
239 {
240         uint16_t new_id = ntohs(a->arg.vlan_id);
241
242         if (new_id != OFP_VLAN_NONE) {
243                 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
244                         /* Modify vlan id, but maintain other TCI values */
245                         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
246                         vh->h_vlan_TCI = (vh->h_vlan_TCI 
247                                         & ~(htons(VLAN_VID_MASK))) | a->arg.vlan_id;
248                 } else  {
249                         /* Add vlan header */
250
251                         /* xxx The vlan_put_tag function, doesn't seem to work
252                          * xxx reliably when it attempts to use the hardware-accelerated
253                          * xxx version.  We'll directly use the software version
254                          * xxx until the problem can be diagnosed.
255                          */
256                         skb = __vlan_put_tag(skb, new_id);
257                 }
258         } else  {
259                 /* Remove an existing vlan header if it exists */
260                 vlan_pull_tag(skb);
261         }
262
263         return skb;
264 }
265
266 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
267                         const struct sw_flow_key *key, const struct ofp_action *a)
268 {
269         switch (ntohs(a->type)) {
270         case OFPAT_SET_DL_VLAN:
271                 skb = modify_vlan(skb, key, a);
272                 break;
273
274         case OFPAT_SET_DL_SRC: {
275                 struct ethhdr *eh = eth_hdr(skb);
276                 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
277                 break;
278         }
279         case OFPAT_SET_DL_DST: {
280                 struct ethhdr *eh = eth_hdr(skb);
281                 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
282                 break;
283         }
284
285         case OFPAT_SET_NW_SRC:
286         case OFPAT_SET_NW_DST:
287                 modify_nh(skb, eth_proto, key->nw_proto, a);
288                 break;
289
290         case OFPAT_SET_TP_SRC:
291         case OFPAT_SET_TP_DST:
292                 modify_th(skb, eth_proto, key->nw_proto, a);
293                 break;
294         
295         default:
296                 if (net_ratelimit())
297                         printk("execute_setter: unknown action: %d\n", ntohs(a->type));
298         }
299
300         return skb;
301 }
302
303 static int
304 recv_features_request(struct sw_chain *chain, const struct sender *sender,
305                       const void *msg) 
306 {
307         return dp_send_features_reply(chain->dp, sender);
308 }
309
310 static int
311 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
312                         const void *msg)
313 {
314         return dp_send_config_reply(chain->dp, sender);
315 }
316
317 static int
318 recv_set_config(struct sw_chain *chain, const struct sender *sender,
319                 const void *msg)
320 {
321         const struct ofp_switch_config *osc = msg;
322         int flags;
323
324         flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
325         if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
326             && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
327                 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
328         }
329         chain->dp->flags = flags;
330
331         chain->dp->miss_send_len = ntohs(osc->miss_send_len);
332
333         return 0;
334 }
335
336 static int
337 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
338                 const void *msg)
339 {
340         const struct ofp_packet_out *opo = msg;
341         struct sk_buff *skb;
342         struct vlan_ethhdr *mac;
343         int nh_ofs;
344         struct sw_flow_key key;
345         int n_actions = ntohs(opo->n_actions);
346         int act_len = n_actions * sizeof opo->actions[0];
347
348         if (act_len > (ntohs(opo->header.length) - sizeof *opo)) {
349                 if (net_ratelimit()) 
350                         printk("message too short for number of actions\n");
351                 return -EINVAL;
352         }
353
354         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
355                 int data_len = ntohs(opo->header.length) - sizeof *opo - act_len;
356
357                 /* FIXME: there is likely a way to reuse the data in msg. */
358                 skb = alloc_skb(data_len, GFP_ATOMIC);
359                 if (!skb)
360                         return -ENOMEM;
361
362                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
363                  * we're just transmitting this raw without examining anything
364                  * at those layers. */
365                 memcpy(skb_put(skb, data_len), &opo->actions[n_actions], data_len);
366
367                 skb_set_mac_header(skb, 0);
368                 mac = vlan_eth_hdr(skb);
369                 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
370                         nh_ofs = sizeof(struct ethhdr);
371                 else
372                         nh_ofs = sizeof(struct vlan_ethhdr);
373                 skb_set_network_header(skb, nh_ofs);
374         } else {
375                 skb = retrieve_skb(ntohl(opo->buffer_id));
376                 if (!skb)
377                         return -ESRCH;
378         }
379
380         dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
381
382         flow_extract(skb, ntohs(opo->in_port), &key);
383         execute_actions(chain->dp, skb, &key, opo->actions, n_actions, 1);
384
385         return 0;
386 }
387
388 static int
389 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
390               const void *msg)
391 {
392         const struct ofp_port_mod *opm = msg;
393
394         dp_update_port_flags(chain->dp, opm);
395
396         return 0;
397 }
398
399 static int
400 recv_echo_request(struct sw_chain *chain, const struct sender *sender,
401                   const void *msg) 
402 {
403         return dp_send_echo_reply(chain->dp, sender, msg);
404 }
405
406 static int
407 recv_echo_reply(struct sw_chain *chain, const struct sender *sender,
408                   const void *msg) 
409 {
410         return 0;
411 }
412
413 static int
414 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
415 {
416         int error = -ENOMEM;
417         int i;
418         int n_actions;
419         struct sw_flow *flow;
420
421
422         /* To prevent loops, make sure there's no action to send to the
423          * OFP_TABLE virtual port.
424          */
425         n_actions = (ntohs(ofm->header.length) - sizeof *ofm) 
426                         / sizeof *ofm->actions;
427         for (i=0; i<n_actions; i++) {
428                 const struct ofp_action *a = &ofm->actions[i];
429
430                 if (a->type == htons(OFPAT_OUTPUT) 
431                                         && (a->arg.output.port == htons(OFPP_TABLE) 
432                                                 || a->arg.output.port == htons(OFPP_NONE)
433                                                 || a->arg.output.port == ofm->match.in_port)) {
434                         /* xxx Send fancy new error message? */
435                         goto error;
436                 }
437         }
438
439         /* Allocate memory. */
440         flow = flow_alloc(n_actions, GFP_ATOMIC);
441         if (flow == NULL)
442                 goto error;
443
444         /* Fill out flow. */
445         flow_extract_match(&flow->key, &ofm->match);
446         flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
447         flow->idle_timeout = ntohs(ofm->idle_timeout);
448         flow->hard_timeout = ntohs(ofm->hard_timeout);
449         flow->used = jiffies;
450         flow->n_actions = n_actions;
451         flow->init_time = jiffies;
452         flow->byte_count = 0;
453         flow->packet_count = 0;
454         spin_lock_init(&flow->lock);
455         memcpy(flow->actions, ofm->actions, n_actions * sizeof *flow->actions);
456
457         /* Act. */
458         error = chain_insert(chain, flow);
459         if (error)
460                 goto error_free_flow;
461         error = 0;
462         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
463                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
464                 if (skb) {
465                         struct sw_flow_key key;
466                         flow_used(flow, skb);
467                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
468                         execute_actions(chain->dp, skb, &key, ofm->actions, n_actions, 0);
469                 }
470                 else
471                         error = -ESRCH;
472         }
473         return error;
474
475 error_free_flow:
476         flow_free(flow);
477 error:
478         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
479                 discard_skb(ntohl(ofm->buffer_id));
480         return error;
481 }
482
483 static int
484 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
485 {
486         const struct ofp_flow_mod *ofm = msg;
487         uint16_t command = ntohs(ofm->command);
488
489         if (command == OFPFC_ADD) {
490                 return add_flow(chain, ofm);
491         }  else if (command == OFPFC_DELETE) {
492                 struct sw_flow_key key;
493                 flow_extract_match(&key, &ofm->match);
494                 return chain_delete(chain, &key, 0, 0) ? 0 : -ESRCH;
495         } else if (command == OFPFC_DELETE_STRICT) {
496                 struct sw_flow_key key;
497                 uint16_t priority;
498                 flow_extract_match(&key, &ofm->match);
499                 priority = key.wildcards ? ntohs(ofm->priority) : -1;
500                 return chain_delete(chain, &key, priority, 1) ? 0 : -ESRCH;
501         } else {
502                 return -ENOTSUPP;
503         }
504 }
505
506 /* 'msg', which is 'length' bytes long, was received across Netlink from
507  * 'sender'.  Apply it to 'chain'. */
508 int
509 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
510                   const void *msg, size_t length)
511 {
512
513         struct openflow_packet {
514                 size_t min_size;
515                 int (*handler)(struct sw_chain *, const struct sender *,
516                                const void *);
517         };
518
519         static const struct openflow_packet packets[] = {
520                 [OFPT_FEATURES_REQUEST] = {
521                         sizeof (struct ofp_header),
522                         recv_features_request,
523                 },
524                 [OFPT_GET_CONFIG_REQUEST] = {
525                         sizeof (struct ofp_header),
526                         recv_get_config_request,
527                 },
528                 [OFPT_SET_CONFIG] = {
529                         sizeof (struct ofp_switch_config),
530                         recv_set_config,
531                 },
532                 [OFPT_PACKET_OUT] = {
533                         sizeof (struct ofp_packet_out),
534                         recv_packet_out,
535                 },
536                 [OFPT_FLOW_MOD] = {
537                         sizeof (struct ofp_flow_mod),
538                         recv_flow,
539                 },
540                 [OFPT_PORT_MOD] = {
541                         sizeof (struct ofp_port_mod),
542                         recv_port_mod,
543                 },
544                 [OFPT_ECHO_REQUEST] = {
545                         sizeof (struct ofp_header),
546                         recv_echo_request,
547                 },
548                 [OFPT_ECHO_REPLY] = {
549                         sizeof (struct ofp_header),
550                         recv_echo_reply,
551                 },
552         };
553
554         struct ofp_header *oh;
555
556         oh = (struct ofp_header *) msg;
557         if (oh->version != OFP_VERSION) {
558                 dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
559                                   OFPBRC_BAD_VERSION, msg, length);
560                 return -EINVAL;
561         }
562         if (ntohs(oh->length) > length)
563                 return -EINVAL;
564
565         if (oh->type < ARRAY_SIZE(packets)) {
566                 const struct openflow_packet *pkt = &packets[oh->type];
567                 if (pkt->handler) {
568                         if (length < pkt->min_size)
569                                 return -EFAULT;
570                         return pkt->handler(chain, sender, msg);
571                 }
572         }
573         dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
574                           OFPBRC_BAD_TYPE, msg, length);
575         return -EINVAL;
576 }
577
578 /* Packet buffering. */
579
580 #define OVERWRITE_SECS  1
581 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
582
583 struct packet_buffer {
584         struct sk_buff *skb;
585         uint32_t cookie;
586         unsigned long exp_jiffies;
587 };
588
589 static struct packet_buffer buffers[N_PKT_BUFFERS];
590 static unsigned int buffer_idx;
591 static DEFINE_SPINLOCK(buffer_lock);
592
593 uint32_t fwd_save_skb(struct sk_buff *skb)
594 {
595         struct sk_buff *old_skb = NULL;
596         struct packet_buffer *p;
597         unsigned long int flags;
598         uint32_t id;
599
600         spin_lock_irqsave(&buffer_lock, flags);
601         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
602         p = &buffers[buffer_idx];
603         if (p->skb) {
604                 /* Don't buffer packet if existing entry is less than
605                  * OVERWRITE_SECS old. */
606                 if (time_before(jiffies, p->exp_jiffies)) {
607                         spin_unlock_irqrestore(&buffer_lock, flags);
608                         return -1;
609                 } else {
610                         /* Defer kfree_skb() until interrupts re-enabled. */
611                         old_skb = p->skb;
612                 }
613         }
614         /* Don't use maximum cookie value since the all-bits-1 id is
615          * special. */
616         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
617                 p->cookie = 0;
618         skb_get(skb);
619         p->skb = skb;
620         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
621         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
622         spin_unlock_irqrestore(&buffer_lock, flags);
623
624         if (old_skb)
625                 kfree_skb(old_skb);
626
627         return id;
628 }
629
630 static struct sk_buff *retrieve_skb(uint32_t id)
631 {
632         unsigned long int flags;
633         struct sk_buff *skb = NULL;
634         struct packet_buffer *p;
635
636         spin_lock_irqsave(&buffer_lock, flags);
637         p = &buffers[id & PKT_BUFFER_MASK];
638         if (p->cookie == id >> PKT_BUFFER_BITS) {
639                 skb = p->skb;
640                 p->skb = NULL;
641         } else {
642                 printk("cookie mismatch: %x != %x\n",
643                                 id >> PKT_BUFFER_BITS, p->cookie);
644         }
645         spin_unlock_irqrestore(&buffer_lock, flags);
646
647         return skb;
648 }
649
650 void fwd_discard_all(void) 
651 {
652         int i;
653
654         for (i = 0; i < N_PKT_BUFFERS; i++) {
655                 struct sk_buff *skb;
656                 unsigned long int flags;
657
658                 /* Defer kfree_skb() until interrupts re-enabled. */
659                 spin_lock_irqsave(&buffer_lock, flags);
660                 skb = buffers[i].skb;
661                 buffers[i].skb = NULL;
662                 spin_unlock_irqrestore(&buffer_lock, flags);
663
664                 kfree_skb(skb);
665         }
666 }
667
668 static void discard_skb(uint32_t id)
669 {
670         struct sk_buff *old_skb = NULL;
671         unsigned long int flags;
672         struct packet_buffer *p;
673
674         spin_lock_irqsave(&buffer_lock, flags);
675         p = &buffers[id & PKT_BUFFER_MASK];
676         if (p->cookie == id >> PKT_BUFFER_BITS) {
677                 /* Defer kfree_skb() until interrupts re-enabled. */
678                 old_skb = p->skb;
679                 p->skb = NULL;
680         }
681         spin_unlock_irqrestore(&buffer_lock, flags);
682
683         if (old_skb)
684                 kfree_skb(old_skb);
685 }
686
687 void fwd_exit(void)
688 {
689         fwd_discard_all();
690 }
691
692 /* Utility functions. */
693
694 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
695  * the copy.
696  * Returns 1 if successful, 0 on failure. */
697 static int
698 make_writable(struct sk_buff **pskb)
699 {
700         /* Based on skb_make_writable() in net/netfilter/core.c. */
701         struct sk_buff *nskb;
702
703         /* Not exclusive use of packet?  Must copy. */
704         if (skb_shared(*pskb) || skb_cloned(*pskb))
705                 goto copy_skb;
706
707         return pskb_may_pull(*pskb, 40); /* FIXME? */
708
709 copy_skb:
710         nskb = skb_copy(*pskb, GFP_ATOMIC);
711         if (!nskb)
712                 return 0;
713         BUG_ON(skb_is_nonlinear(nskb));
714
715         /* Rest of kernel will get very unhappy if we pass it a
716            suddenly-orphaned skbuff */
717         if ((*pskb)->sk)
718                 skb_set_owner_w(nskb, (*pskb)->sk);
719         kfree_skb(*pskb);
720         *pskb = nskb;
721         return 1;
722 }