Merge branch "partner", to simplify partner integration.
[sliver-openvswitch.git] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <linux/in.h>
12 #include <linux/ip.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <asm/uaccess.h>
17 #include <linux/types.h>
18 #include <net/checksum.h>
19 #include "forward.h"
20 #include "datapath.h"
21 #include "chain.h"
22 #include "flow.h"
23
24 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
25
26 static int make_writable(struct sk_buff **);
27
28 static struct sk_buff *retrieve_skb(uint32_t id);
29 static void discard_skb(uint32_t id);
30
31 /* 'skb' was received on port 'p', which may be a physical switch port, the
32  * local port, or a null pointer.  Process it according to 'chain'.  Returns 0
33  * if successful, in which case 'skb' is destroyed, or -ESRCH if there is no
34  * matching flow, in which case 'skb' still belongs to the caller. */
35 int run_flow_through_tables(struct sw_chain *chain, struct sk_buff *skb,
36                             struct net_bridge_port *p)
37 {
38         /* Ethernet address used as the destination for STP frames. */
39         static const uint8_t stp_eth_addr[ETH_ALEN]
40                 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
41         struct sw_flow_key key;
42         struct sw_flow *flow;
43
44         if (flow_extract(skb, p ? p->port_no : OFPP_NONE, &key)
45             && (chain->dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
46                 /* Drop fragment. */
47                 kfree_skb(skb);
48                 return 0;
49         }
50         if (p && p->flags & (OFPPFL_NO_RECV | OFPPFL_NO_RECV_STP) &&
51             p->flags & (compare_ether_addr(key.dl_dst, stp_eth_addr)
52                         ? OFPPFL_NO_RECV : OFPPFL_NO_RECV_STP)) {
53                 kfree_skb(skb);
54                 return 0;
55         }
56
57         flow = chain_lookup(chain, &key);
58         if (likely(flow != NULL)) {
59                 flow_used(flow, skb);
60                 execute_actions(chain->dp, skb, &key,
61                                 flow->actions, flow->n_actions, 0);
62                 return 0;
63         } else {
64                 return -ESRCH;
65         }
66 }
67
68 /* 'skb' was received on port 'p', which may be a physical switch port, the
69  * local port, or a null pointer.  Process it according to 'chain', sending it
70  * up to the controller if no flow matches.  Takes ownership of 'skb'. */
71 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb,
72                     struct net_bridge_port *p)
73 {
74         if (run_flow_through_tables(chain, skb, p))
75                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
76                                   chain->dp->miss_send_len,
77                                   OFPR_NO_MATCH);
78 }
79
80 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
81                      int out_port, int ignore_no_fwd)
82 {
83         if (!skb)
84                 return -ENOMEM;
85         return (likely(out_port != OFPP_CONTROLLER)
86                 ? dp_output_port(dp, skb, out_port, ignore_no_fwd)
87                 : dp_output_control(dp, skb, fwd_save_skb(skb),
88                                          max_len, OFPR_ACTION));
89 }
90
91 void execute_actions(struct datapath *dp, struct sk_buff *skb,
92                      const struct sw_flow_key *key,
93                      const struct ofp_action *actions, int n_actions,
94                      int ignore_no_fwd)
95 {
96         /* Every output action needs a separate clone of 'skb', but the common
97          * case is just a single output action, so that doing a clone and
98          * then freeing the original skbuff is wasteful.  So the following code
99          * is slightly obscure just to avoid that. */
100         int prev_port;
101         size_t max_len=0;        /* Initialze to make compiler happy */
102         uint16_t eth_proto;
103         int i;
104
105         prev_port = -1;
106         eth_proto = ntohs(key->dl_type);
107
108         for (i = 0; i < n_actions; i++) {
109                 const struct ofp_action *a = &actions[i];
110
111                 if (prev_port != -1) {
112                         do_output(dp, skb_clone(skb, GFP_ATOMIC),
113                                   max_len, prev_port, ignore_no_fwd);
114                         prev_port = -1;
115                 }
116
117                 if (likely(a->type == htons(OFPAT_OUTPUT))) {
118                         prev_port = ntohs(a->arg.output.port);
119                         max_len = ntohs(a->arg.output.max_len);
120                 } else {
121                         if (!make_writable(&skb)) {
122                                 if (net_ratelimit())
123                                     printk("make_writable failed\n");
124                                 break;
125                         }
126                         skb = execute_setter(skb, eth_proto, key, a);
127                         if (!skb) {
128                                 if (net_ratelimit())
129                                         printk("execute_setter lost skb\n");
130                                 return;
131                         }
132                 }
133         }
134         if (prev_port != -1)
135                 do_output(dp, skb, max_len, prev_port, ignore_no_fwd);
136         else
137                 kfree_skb(skb);
138 }
139
140 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
141  * covered by the sum has been changed from 'from' to 'to'.  If set,
142  * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
143  * Based on nf_proto_csum_replace4. */
144 static void update_csum(__sum16 *sum, struct sk_buff *skb,
145                         __be32 from, __be32 to, int pseudohdr)
146 {
147         __be32 diff[] = { ~from, to };
148         if (skb->ip_summed != CHECKSUM_PARTIAL) {
149                 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
150                                 ~csum_unfold(*sum)));
151                 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
152                         skb->csum = ~csum_partial((char *)diff, sizeof(diff),
153                                                 ~skb->csum);
154         } else if (pseudohdr)
155                 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
156                                 csum_unfold(*sum)));
157 }
158
159 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
160                         uint8_t nw_proto, const struct ofp_action *a)
161 {
162         if (eth_proto == ETH_P_IP) {
163                 struct iphdr *nh = ip_hdr(skb);
164                 uint32_t new, *field;
165
166                 new = a->arg.nw_addr;
167
168                 if (a->type == htons(OFPAT_SET_NW_SRC))
169                         field = &nh->saddr;
170                 else
171                         field = &nh->daddr;
172
173                 if (nw_proto == IPPROTO_TCP) {
174                         struct tcphdr *th = tcp_hdr(skb);
175                         update_csum(&th->check, skb, *field, new, 1);
176                 } else if (nw_proto == IPPROTO_UDP) {
177                         struct udphdr *th = udp_hdr(skb);
178                         update_csum(&th->check, skb, *field, new, 1);
179                 }
180                 update_csum(&nh->check, skb, *field, new, 0);
181                 *field = new;
182         }
183 }
184
185 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
186                         uint8_t nw_proto, const struct ofp_action *a)
187 {
188         if (eth_proto == ETH_P_IP) {
189                 uint16_t new, *field;
190
191                 new = a->arg.tp;
192
193                 if (nw_proto == IPPROTO_TCP) {
194                         struct tcphdr *th = tcp_hdr(skb);
195
196                         if (a->type == htons(OFPAT_SET_TP_SRC))
197                                 field = &th->source;
198                         else
199                                 field = &th->dest;
200
201                         update_csum(&th->check, skb, *field, new, 1);
202                         *field = new;
203                 } else if (nw_proto == IPPROTO_UDP) {
204                         struct udphdr *th = udp_hdr(skb);
205
206                         if (a->type == htons(OFPAT_SET_TP_SRC))
207                                 field = &th->source;
208                         else
209                                 field = &th->dest;
210
211                         update_csum(&th->check, skb, *field, new, 1);
212                         *field = new;
213                 }
214         }
215 }
216
217 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
218 {
219         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
220         struct ethhdr *eh;
221
222
223         /* Verify we were given a vlan packet */
224         if (vh->h_vlan_proto != htons(ETH_P_8021Q))
225                 return skb;
226
227         memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
228
229         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
230
231         skb->protocol = eh->h_proto;
232         skb->mac_header += VLAN_HLEN;
233
234         return skb;
235 }
236
237 static struct sk_buff *modify_vlan(struct sk_buff *skb, 
238                 const struct sw_flow_key *key, const struct ofp_action *a)
239 {
240         uint16_t new_id = ntohs(a->arg.vlan_id);
241
242         if (new_id != OFP_VLAN_NONE) {
243                 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
244                         /* Modify vlan id, but maintain other TCI values */
245                         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
246                         vh->h_vlan_TCI = (vh->h_vlan_TCI 
247                                         & ~(htons(VLAN_VID_MASK))) | a->arg.vlan_id;
248                 } else  {
249                         /* Add vlan header */
250
251                         /* xxx The vlan_put_tag function, doesn't seem to work
252                          * xxx reliably when it attempts to use the hardware-accelerated
253                          * xxx version.  We'll directly use the software version
254                          * xxx until the problem can be diagnosed.
255                          */
256                         skb = __vlan_put_tag(skb, new_id);
257                 }
258         } else  {
259                 /* Remove an existing vlan header if it exists */
260                 vlan_pull_tag(skb);
261         }
262
263         return skb;
264 }
265
266 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
267                         const struct sw_flow_key *key, const struct ofp_action *a)
268 {
269         switch (ntohs(a->type)) {
270         case OFPAT_SET_DL_VLAN:
271                 skb = modify_vlan(skb, key, a);
272                 break;
273
274         case OFPAT_SET_DL_SRC: {
275                 struct ethhdr *eh = eth_hdr(skb);
276                 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
277                 break;
278         }
279         case OFPAT_SET_DL_DST: {
280                 struct ethhdr *eh = eth_hdr(skb);
281                 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
282                 break;
283         }
284
285         case OFPAT_SET_NW_SRC:
286         case OFPAT_SET_NW_DST:
287                 modify_nh(skb, eth_proto, key->nw_proto, a);
288                 break;
289
290         case OFPAT_SET_TP_SRC:
291         case OFPAT_SET_TP_DST:
292                 modify_th(skb, eth_proto, key->nw_proto, a);
293                 break;
294         
295         default:
296                 if (net_ratelimit())
297                         printk("execute_setter: unknown action: %d\n", ntohs(a->type));
298         }
299
300         return skb;
301 }
302
303 static int
304 recv_hello(struct sw_chain *chain, const struct sender *sender,
305            const void *msg)
306 {
307         return dp_send_hello(chain->dp, sender, msg);
308 }
309
310 static int
311 recv_features_request(struct sw_chain *chain, const struct sender *sender,
312                       const void *msg) 
313 {
314         return dp_send_features_reply(chain->dp, sender);
315 }
316
317 static int
318 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
319                         const void *msg)
320 {
321         return dp_send_config_reply(chain->dp, sender);
322 }
323
324 static int
325 recv_set_config(struct sw_chain *chain, const struct sender *sender,
326                 const void *msg)
327 {
328         const struct ofp_switch_config *osc = msg;
329         int flags;
330
331         flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
332         if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
333             && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
334                 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
335         }
336         chain->dp->flags = flags;
337
338         chain->dp->miss_send_len = ntohs(osc->miss_send_len);
339
340         return 0;
341 }
342
343 static int
344 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
345                 const void *msg)
346 {
347         const struct ofp_packet_out *opo = msg;
348         struct sk_buff *skb;
349         struct vlan_ethhdr *mac;
350         int nh_ofs;
351         struct sw_flow_key key;
352         int n_actions = ntohs(opo->n_actions);
353         int act_len = n_actions * sizeof opo->actions[0];
354
355         if (act_len > (ntohs(opo->header.length) - sizeof *opo)) {
356                 if (net_ratelimit()) 
357                         printk("message too short for number of actions\n");
358                 return -EINVAL;
359         }
360
361         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
362                 int data_len = ntohs(opo->header.length) - sizeof *opo - act_len;
363
364                 /* FIXME: there is likely a way to reuse the data in msg. */
365                 skb = alloc_skb(data_len, GFP_ATOMIC);
366                 if (!skb)
367                         return -ENOMEM;
368
369                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
370                  * we're just transmitting this raw without examining anything
371                  * at those layers. */
372                 memcpy(skb_put(skb, data_len), &opo->actions[n_actions], data_len);
373
374                 skb_set_mac_header(skb, 0);
375                 mac = vlan_eth_hdr(skb);
376                 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
377                         nh_ofs = sizeof(struct ethhdr);
378                 else
379                         nh_ofs = sizeof(struct vlan_ethhdr);
380                 skb_set_network_header(skb, nh_ofs);
381         } else {
382                 skb = retrieve_skb(ntohl(opo->buffer_id));
383                 if (!skb)
384                         return -ESRCH;
385         }
386
387         dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
388
389         flow_extract(skb, ntohs(opo->in_port), &key);
390         execute_actions(chain->dp, skb, &key, opo->actions, n_actions, 1);
391
392         return 0;
393 }
394
395 static int
396 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
397               const void *msg)
398 {
399         const struct ofp_port_mod *opm = msg;
400
401         dp_update_port_flags(chain->dp, opm);
402
403         return 0;
404 }
405
406 static int
407 recv_echo_request(struct sw_chain *chain, const struct sender *sender,
408                   const void *msg) 
409 {
410         return dp_send_echo_reply(chain->dp, sender, msg);
411 }
412
413 static int
414 recv_echo_reply(struct sw_chain *chain, const struct sender *sender,
415                   const void *msg) 
416 {
417         return 0;
418 }
419
420 static int
421 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
422 {
423         int error = -ENOMEM;
424         int i;
425         int n_actions;
426         struct sw_flow *flow;
427
428
429         /* To prevent loops, make sure there's no action to send to the
430          * OFP_TABLE virtual port.
431          */
432         n_actions = (ntohs(ofm->header.length) - sizeof *ofm) 
433                         / sizeof *ofm->actions;
434         for (i=0; i<n_actions; i++) {
435                 const struct ofp_action *a = &ofm->actions[i];
436
437                 if (a->type == htons(OFPAT_OUTPUT) 
438                                         && (a->arg.output.port == htons(OFPP_TABLE) 
439                                                 || a->arg.output.port == htons(OFPP_NONE)
440                                                 || a->arg.output.port == ofm->match.in_port)) {
441                         /* xxx Send fancy new error message? */
442                         goto error;
443                 }
444         }
445
446         /* Allocate memory. */
447         flow = flow_alloc(n_actions, GFP_ATOMIC);
448         if (flow == NULL)
449                 goto error;
450
451         /* Fill out flow. */
452         flow_extract_match(&flow->key, &ofm->match);
453         flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
454         flow->idle_timeout = ntohs(ofm->idle_timeout);
455         flow->hard_timeout = ntohs(ofm->hard_timeout);
456         flow->used = jiffies;
457         flow->n_actions = n_actions;
458         flow->init_time = jiffies;
459         flow->byte_count = 0;
460         flow->packet_count = 0;
461         spin_lock_init(&flow->lock);
462         memcpy(flow->actions, ofm->actions, n_actions * sizeof *flow->actions);
463
464         /* Act. */
465         error = chain_insert(chain, flow);
466         if (error)
467                 goto error_free_flow;
468         error = 0;
469         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
470                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
471                 if (skb) {
472                         struct sw_flow_key key;
473                         flow_used(flow, skb);
474                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
475                         execute_actions(chain->dp, skb, &key, ofm->actions, n_actions, 0);
476                 }
477                 else
478                         error = -ESRCH;
479         }
480         return error;
481
482 error_free_flow:
483         flow_free(flow);
484 error:
485         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
486                 discard_skb(ntohl(ofm->buffer_id));
487         return error;
488 }
489
490 static int
491 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
492 {
493         const struct ofp_flow_mod *ofm = msg;
494         uint16_t command = ntohs(ofm->command);
495
496         if (command == OFPFC_ADD) {
497                 return add_flow(chain, ofm);
498         }  else if (command == OFPFC_DELETE) {
499                 struct sw_flow_key key;
500                 flow_extract_match(&key, &ofm->match);
501                 return chain_delete(chain, &key, 0, 0) ? 0 : -ESRCH;
502         } else if (command == OFPFC_DELETE_STRICT) {
503                 struct sw_flow_key key;
504                 uint16_t priority;
505                 flow_extract_match(&key, &ofm->match);
506                 priority = key.wildcards ? ntohs(ofm->priority) : -1;
507                 return chain_delete(chain, &key, priority, 1) ? 0 : -ESRCH;
508         } else {
509                 return -ENOTSUPP;
510         }
511 }
512
513 /* 'msg', which is 'length' bytes long, was received across Netlink from
514  * 'sender'.  Apply it to 'chain'. */
515 int
516 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
517                   const void *msg, size_t length)
518 {
519
520         struct openflow_packet {
521                 size_t min_size;
522                 int (*handler)(struct sw_chain *, const struct sender *,
523                                const void *);
524         };
525
526         static const struct openflow_packet packets[] = {
527                 [OFPT_HELLO] = {
528                         sizeof (struct ofp_header),
529                         recv_hello,
530                 },
531                 [OFPT_FEATURES_REQUEST] = {
532                         sizeof (struct ofp_header),
533                         recv_features_request,
534                 },
535                 [OFPT_GET_CONFIG_REQUEST] = {
536                         sizeof (struct ofp_header),
537                         recv_get_config_request,
538                 },
539                 [OFPT_SET_CONFIG] = {
540                         sizeof (struct ofp_switch_config),
541                         recv_set_config,
542                 },
543                 [OFPT_PACKET_OUT] = {
544                         sizeof (struct ofp_packet_out),
545                         recv_packet_out,
546                 },
547                 [OFPT_FLOW_MOD] = {
548                         sizeof (struct ofp_flow_mod),
549                         recv_flow,
550                 },
551                 [OFPT_PORT_MOD] = {
552                         sizeof (struct ofp_port_mod),
553                         recv_port_mod,
554                 },
555                 [OFPT_ECHO_REQUEST] = {
556                         sizeof (struct ofp_header),
557                         recv_echo_request,
558                 },
559                 [OFPT_ECHO_REPLY] = {
560                         sizeof (struct ofp_header),
561                         recv_echo_reply,
562                 },
563         };
564
565         struct ofp_header *oh;
566
567         oh = (struct ofp_header *) msg;
568         if (oh->version != OFP_VERSION
569             && oh->type != OFPT_HELLO
570             && oh->type != OFPT_ERROR
571             && oh->type != OFPT_ECHO_REQUEST
572             && oh->type != OFPT_ECHO_REPLY
573             && oh->type != OFPT_VENDOR)
574         {
575                 dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
576                                   OFPBRC_BAD_VERSION, msg, length);
577                 return -EINVAL;
578         }
579         if (ntohs(oh->length) > length)
580                 return -EINVAL;
581
582         if (oh->type < ARRAY_SIZE(packets)) {
583                 const struct openflow_packet *pkt = &packets[oh->type];
584                 if (pkt->handler) {
585                         if (length < pkt->min_size)
586                                 return -EFAULT;
587                         return pkt->handler(chain, sender, msg);
588                 }
589         }
590         dp_send_error_msg(chain->dp, sender, OFPET_BAD_REQUEST,
591                           OFPBRC_BAD_TYPE, msg, length);
592         return -EINVAL;
593 }
594
595 /* Packet buffering. */
596
597 #define OVERWRITE_SECS  1
598 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
599
600 struct packet_buffer {
601         struct sk_buff *skb;
602         uint32_t cookie;
603         unsigned long exp_jiffies;
604 };
605
606 static struct packet_buffer buffers[N_PKT_BUFFERS];
607 static unsigned int buffer_idx;
608 static DEFINE_SPINLOCK(buffer_lock);
609
610 uint32_t fwd_save_skb(struct sk_buff *skb)
611 {
612         struct sk_buff *old_skb = NULL;
613         struct packet_buffer *p;
614         unsigned long int flags;
615         uint32_t id;
616
617         spin_lock_irqsave(&buffer_lock, flags);
618         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
619         p = &buffers[buffer_idx];
620         if (p->skb) {
621                 /* Don't buffer packet if existing entry is less than
622                  * OVERWRITE_SECS old. */
623                 if (time_before(jiffies, p->exp_jiffies)) {
624                         spin_unlock_irqrestore(&buffer_lock, flags);
625                         return -1;
626                 } else {
627                         /* Defer kfree_skb() until interrupts re-enabled. */
628                         old_skb = p->skb;
629                 }
630         }
631         /* Don't use maximum cookie value since the all-bits-1 id is
632          * special. */
633         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
634                 p->cookie = 0;
635         skb_get(skb);
636         p->skb = skb;
637         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
638         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
639         spin_unlock_irqrestore(&buffer_lock, flags);
640
641         if (old_skb)
642                 kfree_skb(old_skb);
643
644         return id;
645 }
646
647 static struct sk_buff *retrieve_skb(uint32_t id)
648 {
649         unsigned long int flags;
650         struct sk_buff *skb = NULL;
651         struct packet_buffer *p;
652
653         spin_lock_irqsave(&buffer_lock, flags);
654         p = &buffers[id & PKT_BUFFER_MASK];
655         if (p->cookie == id >> PKT_BUFFER_BITS) {
656                 skb = p->skb;
657                 p->skb = NULL;
658         } else {
659                 printk("cookie mismatch: %x != %x\n",
660                                 id >> PKT_BUFFER_BITS, p->cookie);
661         }
662         spin_unlock_irqrestore(&buffer_lock, flags);
663
664         return skb;
665 }
666
667 void fwd_discard_all(void) 
668 {
669         int i;
670
671         for (i = 0; i < N_PKT_BUFFERS; i++) {
672                 struct sk_buff *skb;
673                 unsigned long int flags;
674
675                 /* Defer kfree_skb() until interrupts re-enabled. */
676                 spin_lock_irqsave(&buffer_lock, flags);
677                 skb = buffers[i].skb;
678                 buffers[i].skb = NULL;
679                 spin_unlock_irqrestore(&buffer_lock, flags);
680
681                 kfree_skb(skb);
682         }
683 }
684
685 static void discard_skb(uint32_t id)
686 {
687         struct sk_buff *old_skb = NULL;
688         unsigned long int flags;
689         struct packet_buffer *p;
690
691         spin_lock_irqsave(&buffer_lock, flags);
692         p = &buffers[id & PKT_BUFFER_MASK];
693         if (p->cookie == id >> PKT_BUFFER_BITS) {
694                 /* Defer kfree_skb() until interrupts re-enabled. */
695                 old_skb = p->skb;
696                 p->skb = NULL;
697         }
698         spin_unlock_irqrestore(&buffer_lock, flags);
699
700         if (old_skb)
701                 kfree_skb(old_skb);
702 }
703
704 void fwd_exit(void)
705 {
706         fwd_discard_all();
707 }
708
709 /* Utility functions. */
710
711 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
712  * the copy.
713  * Returns 1 if successful, 0 on failure. */
714 static int
715 make_writable(struct sk_buff **pskb)
716 {
717         /* Based on skb_make_writable() in net/netfilter/core.c. */
718         struct sk_buff *nskb;
719
720         /* Not exclusive use of packet?  Must copy. */
721         if (skb_shared(*pskb) || skb_cloned(*pskb))
722                 goto copy_skb;
723
724         return pskb_may_pull(*pskb, 40); /* FIXME? */
725
726 copy_skb:
727         nskb = skb_copy(*pskb, GFP_ATOMIC);
728         if (!nskb)
729                 return 0;
730         BUG_ON(skb_is_nonlinear(nskb));
731
732         /* Rest of kernel will get very unhappy if we pass it a
733            suddenly-orphaned skbuff */
734         if ((*pskb)->sk)
735                 skb_set_owner_w(nskb, (*pskb)->sk);
736         kfree_skb(*pskb);
737         *pskb = nskb;
738         return 1;
739 }