Switch to new packet-out format and add OFPP_IN_PORT.
[sliver-openvswitch.git] / datapath / forward.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include <linux/if_ether.h>
8 #include <linux/if_vlan.h>
9 #include <linux/in.h>
10 #include <linux/ip.h>
11 #include <linux/tcp.h>
12 #include <linux/udp.h>
13 #include <linux/in6.h>
14 #include <asm/uaccess.h>
15 #include <linux/types.h>
16 #include <net/checksum.h>
17 #include "forward.h"
18 #include "datapath.h"
19 #include "chain.h"
20 #include "flow.h"
21
22 /* FIXME: do we need to use GFP_ATOMIC everywhere here? */
23
24 static int make_writable(struct sk_buff **);
25
26 static struct sk_buff *retrieve_skb(uint32_t id);
27 static void discard_skb(uint32_t id);
28
29 /* 'skb' was received on 'in_port', a physical switch port between 0 and
30  * OFPP_MAX.  Process it according to 'chain'.  Returns 0 if successful, in
31  * which case 'skb' is destroyed, or -ESRCH if there is no matching flow, in
32  * which case 'skb' still belongs to the caller. */
33 int run_flow_through_tables(struct sw_chain *chain, struct sk_buff *skb,
34                             int in_port)
35 {
36         struct sw_flow_key key;
37         struct sw_flow *flow;
38
39         if (flow_extract(skb, in_port, &key)
40             && (chain->dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
41                 /* Drop fragment. */
42                 kfree_skb(skb);
43                 return 0;
44         }
45
46         flow = chain_lookup(chain, &key);
47         if (likely(flow != NULL)) {
48                 flow_used(flow, skb);
49                 execute_actions(chain->dp, skb, &key,
50                                 flow->actions, flow->n_actions);
51                 return 0;
52         } else {
53                 return -ESRCH;
54         }
55 }
56
57 /* 'skb' was received on 'in_port', a physical switch port between 0 and
58  * OFPP_MAX.  Process it according to 'chain', sending it up to the controller
59  * if no flow matches.  Takes ownership of 'skb'. */
60 void fwd_port_input(struct sw_chain *chain, struct sk_buff *skb, int in_port)
61 {
62         if (run_flow_through_tables(chain, skb, in_port))
63                 dp_output_control(chain->dp, skb, fwd_save_skb(skb), 
64                                   chain->dp->miss_send_len,
65                                   OFPR_NO_MATCH);
66 }
67
68 static int do_output(struct datapath *dp, struct sk_buff *skb, size_t max_len,
69                         int out_port)
70 {
71         if (!skb)
72                 return -ENOMEM;
73         return (likely(out_port != OFPP_CONTROLLER)
74                 ? dp_output_port(dp, skb, out_port)
75                 : dp_output_control(dp, skb, fwd_save_skb(skb),
76                                          max_len, OFPR_ACTION));
77 }
78
79 void execute_actions(struct datapath *dp, struct sk_buff *skb,
80                                 const struct sw_flow_key *key,
81                                 const struct ofp_action *actions, int n_actions)
82 {
83         /* Every output action needs a separate clone of 'skb', but the common
84          * case is just a single output action, so that doing a clone and
85          * then freeing the original skbuff is wasteful.  So the following code
86          * is slightly obscure just to avoid that. */
87         int prev_port;
88         size_t max_len=0;        /* Initialze to make compiler happy */
89         uint16_t eth_proto;
90         int i;
91
92         prev_port = -1;
93         eth_proto = ntohs(key->dl_type);
94
95         for (i = 0; i < n_actions; i++) {
96                 const struct ofp_action *a = &actions[i];
97
98                 if (prev_port != -1) {
99                         do_output(dp, skb_clone(skb, GFP_ATOMIC),
100                                   max_len, prev_port);
101                         prev_port = -1;
102                 }
103
104                 if (likely(a->type == htons(OFPAT_OUTPUT))) {
105                         prev_port = ntohs(a->arg.output.port);
106                         max_len = ntohs(a->arg.output.max_len);
107                 } else {
108                         if (!make_writable(&skb)) {
109                                 if (net_ratelimit())
110                                     printk("make_writable failed\n");
111                                 break;
112                         }
113                         skb = execute_setter(skb, eth_proto, key, a);
114                         if (!skb) {
115                                 if (net_ratelimit())
116                                         printk("execute_setter lost skb\n");
117                                 return;
118                         }
119                 }
120         }
121         if (prev_port != -1)
122                 do_output(dp, skb, max_len, prev_port);
123         else
124                 kfree_skb(skb);
125 }
126
127 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
128  * covered by the sum has been changed from 'from' to 'to'.  If set,
129  * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
130  * Based on nf_proto_csum_replace4. */
131 static void update_csum(__sum16 *sum, struct sk_buff *skb,
132                         __be32 from, __be32 to, int pseudohdr)
133 {
134         __be32 diff[] = { ~from, to };
135         if (skb->ip_summed != CHECKSUM_PARTIAL) {
136                 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
137                                 ~csum_unfold(*sum)));
138                 if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
139                         skb->csum = ~csum_partial((char *)diff, sizeof(diff),
140                                                 ~skb->csum);
141         } else if (pseudohdr)
142                 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
143                                 csum_unfold(*sum)));
144 }
145
146 static void modify_nh(struct sk_buff *skb, uint16_t eth_proto,
147                         uint8_t nw_proto, const struct ofp_action *a)
148 {
149         if (eth_proto == ETH_P_IP) {
150                 struct iphdr *nh = ip_hdr(skb);
151                 uint32_t new, *field;
152
153                 new = a->arg.nw_addr;
154
155                 if (a->type == htons(OFPAT_SET_NW_SRC))
156                         field = &nh->saddr;
157                 else
158                         field = &nh->daddr;
159
160                 if (nw_proto == IPPROTO_TCP) {
161                         struct tcphdr *th = tcp_hdr(skb);
162                         update_csum(&th->check, skb, *field, new, 1);
163                 } else if (nw_proto == IPPROTO_UDP) {
164                         struct udphdr *th = udp_hdr(skb);
165                         update_csum(&th->check, skb, *field, new, 1);
166                 }
167                 update_csum(&nh->check, skb, *field, new, 0);
168                 *field = new;
169         }
170 }
171
172 static void modify_th(struct sk_buff *skb, uint16_t eth_proto,
173                         uint8_t nw_proto, const struct ofp_action *a)
174 {
175         if (eth_proto == ETH_P_IP) {
176                 uint16_t new, *field;
177
178                 new = a->arg.tp;
179
180                 if (nw_proto == IPPROTO_TCP) {
181                         struct tcphdr *th = tcp_hdr(skb);
182
183                         if (a->type == htons(OFPAT_SET_TP_SRC))
184                                 field = &th->source;
185                         else
186                                 field = &th->dest;
187
188                         update_csum(&th->check, skb, *field, new, 1);
189                         *field = new;
190                 } else if (nw_proto == IPPROTO_UDP) {
191                         struct udphdr *th = udp_hdr(skb);
192
193                         if (a->type == htons(OFPAT_SET_TP_SRC))
194                                 field = &th->source;
195                         else
196                                 field = &th->dest;
197
198                         update_csum(&th->check, skb, *field, new, 1);
199                         *field = new;
200                 }
201         }
202 }
203
204 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
205 {
206         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
207         struct ethhdr *eh;
208
209
210         /* Verify we were given a vlan packet */
211         if (vh->h_vlan_proto != htons(ETH_P_8021Q))
212                 return skb;
213
214         memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
215
216         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
217
218         skb->protocol = eh->h_proto;
219         skb->mac_header += VLAN_HLEN;
220
221         return skb;
222 }
223
224 static struct sk_buff *modify_vlan(struct sk_buff *skb, 
225                 const struct sw_flow_key *key, const struct ofp_action *a)
226 {
227         uint16_t new_id = ntohs(a->arg.vlan_id);
228
229         if (new_id != OFP_VLAN_NONE) {
230                 if (key->dl_vlan != htons(OFP_VLAN_NONE)) {
231                         /* Modify vlan id, but maintain other TCI values */
232                         struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
233                         vh->h_vlan_TCI = (vh->h_vlan_TCI 
234                                         & ~(htons(VLAN_VID_MASK))) | a->arg.vlan_id;
235                 } else  {
236                         /* Add vlan header */
237
238                         /* xxx The vlan_put_tag function, doesn't seem to work
239                          * xxx reliably when it attempts to use the hardware-accelerated
240                          * xxx version.  We'll directly use the software version
241                          * xxx until the problem can be diagnosed.
242                          */
243                         skb = __vlan_put_tag(skb, new_id);
244                 }
245         } else  {
246                 /* Remove an existing vlan header if it exists */
247                 vlan_pull_tag(skb);
248         }
249
250         return skb;
251 }
252
253 struct sk_buff *execute_setter(struct sk_buff *skb, uint16_t eth_proto,
254                         const struct sw_flow_key *key, const struct ofp_action *a)
255 {
256         switch (ntohs(a->type)) {
257         case OFPAT_SET_DL_VLAN:
258                 skb = modify_vlan(skb, key, a);
259                 break;
260
261         case OFPAT_SET_DL_SRC: {
262                 struct ethhdr *eh = eth_hdr(skb);
263                 memcpy(eh->h_source, a->arg.dl_addr, sizeof eh->h_source);
264                 break;
265         }
266         case OFPAT_SET_DL_DST: {
267                 struct ethhdr *eh = eth_hdr(skb);
268                 memcpy(eh->h_dest, a->arg.dl_addr, sizeof eh->h_dest);
269                 break;
270         }
271
272         case OFPAT_SET_NW_SRC:
273         case OFPAT_SET_NW_DST:
274                 modify_nh(skb, eth_proto, key->nw_proto, a);
275                 break;
276
277         case OFPAT_SET_TP_SRC:
278         case OFPAT_SET_TP_DST:
279                 modify_th(skb, eth_proto, key->nw_proto, a);
280                 break;
281         
282         default:
283                 if (net_ratelimit())
284                         printk("execute_setter: unknown action: %d\n", ntohs(a->type));
285         }
286
287         return skb;
288 }
289
290 static int
291 recv_features_request(struct sw_chain *chain, const struct sender *sender,
292                       const void *msg) 
293 {
294         return dp_send_features_reply(chain->dp, sender);
295 }
296
297 static int
298 recv_get_config_request(struct sw_chain *chain, const struct sender *sender,
299                         const void *msg)
300 {
301         return dp_send_config_reply(chain->dp, sender);
302 }
303
304 static int
305 recv_set_config(struct sw_chain *chain, const struct sender *sender,
306                 const void *msg)
307 {
308         const struct ofp_switch_config *osc = msg;
309         int flags;
310
311         flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
312         if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
313             && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
314                 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
315         }
316         chain->dp->flags = flags;
317
318         chain->dp->miss_send_len = ntohs(osc->miss_send_len);
319
320         return 0;
321 }
322
323 static int
324 recv_packet_out(struct sw_chain *chain, const struct sender *sender,
325                 const void *msg)
326 {
327         const struct ofp_packet_out *opo = msg;
328         struct sk_buff *skb;
329         struct vlan_ethhdr *mac;
330         int nh_ofs;
331         struct sw_flow_key key;
332         int n_actions = ntohs(opo->n_actions);
333         int act_len = n_actions * sizeof opo->actions[0];
334
335         if (act_len > (ntohs(opo->header.length) - sizeof *opo)) {
336                 if (net_ratelimit()) 
337                         printk("message too short for number of actions\n");
338                 return -EINVAL;
339         }
340
341         if (ntohl(opo->buffer_id) == (uint32_t) -1) {
342                 int data_len = ntohs(opo->header.length) - sizeof *opo - act_len;
343
344                 /* FIXME: there is likely a way to reuse the data in msg. */
345                 skb = alloc_skb(data_len, GFP_ATOMIC);
346                 if (!skb)
347                         return -ENOMEM;
348
349                 /* FIXME?  We don't reserve NET_IP_ALIGN or NET_SKB_PAD since
350                  * we're just transmitting this raw without examining anything
351                  * at those layers. */
352                 memcpy(skb_put(skb, data_len), &opo->actions[n_actions], data_len);
353
354                 skb_set_mac_header(skb, 0);
355                 mac = vlan_eth_hdr(skb);
356                 if (likely(mac->h_vlan_proto != htons(ETH_P_8021Q)))
357                         nh_ofs = sizeof(struct ethhdr);
358                 else
359                         nh_ofs = sizeof(struct vlan_ethhdr);
360                 skb_set_network_header(skb, nh_ofs);
361         } else {
362                 skb = retrieve_skb(ntohl(opo->buffer_id));
363                 if (!skb)
364                         return -ESRCH;
365         }
366
367         dp_set_origin(chain->dp, ntohs(opo->in_port), skb);
368
369         flow_extract(skb, ntohs(opo->in_port), &key);
370         execute_actions(chain->dp, skb, &key, opo->actions, n_actions);
371
372         return 0;
373 }
374
375 static int
376 recv_port_mod(struct sw_chain *chain, const struct sender *sender,
377               const void *msg)
378 {
379         const struct ofp_port_mod *opm = msg;
380
381         dp_update_port_flags(chain->dp, &opm->desc);
382
383         return 0;
384 }
385
386 static int
387 recv_echo_request(struct sw_chain *chain, const struct sender *sender,
388                   const void *msg) 
389 {
390         return dp_send_echo_reply(chain->dp, sender, msg);
391 }
392
393 static int
394 recv_echo_reply(struct sw_chain *chain, const struct sender *sender,
395                   const void *msg) 
396 {
397         return 0;
398 }
399
400 static int
401 add_flow(struct sw_chain *chain, const struct ofp_flow_mod *ofm)
402 {
403         int error = -ENOMEM;
404         int i;
405         int n_actions;
406         struct sw_flow *flow;
407
408
409         /* To prevent loops, make sure there's no action to send to the
410          * OFP_TABLE virtual port.
411          */
412         n_actions = (ntohs(ofm->header.length) - sizeof *ofm) 
413                         / sizeof *ofm->actions;
414         for (i=0; i<n_actions; i++) {
415                 const struct ofp_action *a = &ofm->actions[i];
416
417                 if (a->type == htons(OFPAT_OUTPUT) 
418                                         && (a->arg.output.port == htons(OFPP_TABLE) 
419                                                 || a->arg.output.port == htons(OFPP_NONE)
420                                                 || a->arg.output.port == ofm->match.in_port)) {
421                         /* xxx Send fancy new error message? */
422                         goto error;
423                 }
424         }
425
426         /* Allocate memory. */
427         flow = flow_alloc(n_actions, GFP_ATOMIC);
428         if (flow == NULL)
429                 goto error;
430
431         /* Fill out flow. */
432         flow_extract_match(&flow->key, &ofm->match);
433         flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
434         flow->idle_timeout = ntohs(ofm->idle_timeout);
435         flow->hard_timeout = ntohs(ofm->hard_timeout);
436         flow->used = jiffies;
437         flow->n_actions = n_actions;
438         flow->init_time = jiffies;
439         flow->byte_count = 0;
440         flow->packet_count = 0;
441         spin_lock_init(&flow->lock);
442         memcpy(flow->actions, ofm->actions, n_actions * sizeof *flow->actions);
443
444         /* Act. */
445         error = chain_insert(chain, flow);
446         if (error)
447                 goto error_free_flow;
448         error = 0;
449         if (ntohl(ofm->buffer_id) != (uint32_t) -1) {
450                 struct sk_buff *skb = retrieve_skb(ntohl(ofm->buffer_id));
451                 if (skb) {
452                         struct sw_flow_key key;
453                         flow_used(flow, skb);
454                         flow_extract(skb, ntohs(ofm->match.in_port), &key);
455                         execute_actions(chain->dp, skb, &key, ofm->actions, n_actions);
456                 }
457                 else
458                         error = -ESRCH;
459         }
460         return error;
461
462 error_free_flow:
463         flow_free(flow);
464 error:
465         if (ntohl(ofm->buffer_id) != (uint32_t) -1)
466                 discard_skb(ntohl(ofm->buffer_id));
467         return error;
468 }
469
470 static int
471 recv_flow(struct sw_chain *chain, const struct sender *sender, const void *msg)
472 {
473         const struct ofp_flow_mod *ofm = msg;
474         uint16_t command = ntohs(ofm->command);
475
476         if (command == OFPFC_ADD) {
477                 return add_flow(chain, ofm);
478         }  else if (command == OFPFC_DELETE) {
479                 struct sw_flow_key key;
480                 flow_extract_match(&key, &ofm->match);
481                 return chain_delete(chain, &key, 0, 0) ? 0 : -ESRCH;
482         } else if (command == OFPFC_DELETE_STRICT) {
483                 struct sw_flow_key key;
484                 uint16_t priority;
485                 flow_extract_match(&key, &ofm->match);
486                 priority = key.wildcards ? ntohs(ofm->priority) : -1;
487                 return chain_delete(chain, &key, priority, 1) ? 0 : -ESRCH;
488         } else {
489                 return -ENOTSUPP;
490         }
491 }
492
493 /* 'msg', which is 'length' bytes long, was received across Netlink from
494  * 'sender'.  Apply it to 'chain'. */
495 int
496 fwd_control_input(struct sw_chain *chain, const struct sender *sender,
497                   const void *msg, size_t length)
498 {
499
500         struct openflow_packet {
501                 size_t min_size;
502                 int (*handler)(struct sw_chain *, const struct sender *,
503                                const void *);
504         };
505
506         static const struct openflow_packet packets[] = {
507                 [OFPT_FEATURES_REQUEST] = {
508                         sizeof (struct ofp_header),
509                         recv_features_request,
510                 },
511                 [OFPT_GET_CONFIG_REQUEST] = {
512                         sizeof (struct ofp_header),
513                         recv_get_config_request,
514                 },
515                 [OFPT_SET_CONFIG] = {
516                         sizeof (struct ofp_switch_config),
517                         recv_set_config,
518                 },
519                 [OFPT_PACKET_OUT] = {
520                         sizeof (struct ofp_packet_out),
521                         recv_packet_out,
522                 },
523                 [OFPT_FLOW_MOD] = {
524                         sizeof (struct ofp_flow_mod),
525                         recv_flow,
526                 },
527                 [OFPT_PORT_MOD] = {
528                         sizeof (struct ofp_port_mod),
529                         recv_port_mod,
530                 },
531                 [OFPT_ECHO_REQUEST] = {
532                         sizeof (struct ofp_header),
533                         recv_echo_request,
534                 },
535                 [OFPT_ECHO_REPLY] = {
536                         sizeof (struct ofp_header),
537                         recv_echo_reply,
538                 },
539         };
540
541         const struct openflow_packet *pkt;
542         struct ofp_header *oh;
543
544         oh = (struct ofp_header *) msg;
545         if (oh->version != OFP_VERSION || oh->type >= ARRAY_SIZE(packets)
546                 || ntohs(oh->length) > length)
547                 return -EINVAL;
548
549         pkt = &packets[oh->type];
550         if (!pkt->handler)
551                 return -ENOSYS;
552         if (length < pkt->min_size)
553                 return -EFAULT;
554
555         return pkt->handler(chain, sender, msg);
556 }
557
558 /* Packet buffering. */
559
560 #define OVERWRITE_SECS  1
561 #define OVERWRITE_JIFFIES (OVERWRITE_SECS * HZ)
562
563 struct packet_buffer {
564         struct sk_buff *skb;
565         uint32_t cookie;
566         unsigned long exp_jiffies;
567 };
568
569 static struct packet_buffer buffers[N_PKT_BUFFERS];
570 static unsigned int buffer_idx;
571 static DEFINE_SPINLOCK(buffer_lock);
572
573 uint32_t fwd_save_skb(struct sk_buff *skb)
574 {
575         struct sk_buff *old_skb = NULL;
576         struct packet_buffer *p;
577         unsigned long int flags;
578         uint32_t id;
579
580         spin_lock_irqsave(&buffer_lock, flags);
581         buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
582         p = &buffers[buffer_idx];
583         if (p->skb) {
584                 /* Don't buffer packet if existing entry is less than
585                  * OVERWRITE_SECS old. */
586                 if (time_before(jiffies, p->exp_jiffies)) {
587                         spin_unlock_irqrestore(&buffer_lock, flags);
588                         return -1;
589                 } else {
590                         /* Defer kfree_skb() until interrupts re-enabled. */
591                         old_skb = p->skb;
592                 }
593         }
594         /* Don't use maximum cookie value since the all-bits-1 id is
595          * special. */
596         if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
597                 p->cookie = 0;
598         skb_get(skb);
599         p->skb = skb;
600         p->exp_jiffies = jiffies + OVERWRITE_JIFFIES;
601         id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
602         spin_unlock_irqrestore(&buffer_lock, flags);
603
604         if (old_skb)
605                 kfree_skb(old_skb);
606
607         return id;
608 }
609
610 static struct sk_buff *retrieve_skb(uint32_t id)
611 {
612         unsigned long int flags;
613         struct sk_buff *skb = NULL;
614         struct packet_buffer *p;
615
616         spin_lock_irqsave(&buffer_lock, flags);
617         p = &buffers[id & PKT_BUFFER_MASK];
618         if (p->cookie == id >> PKT_BUFFER_BITS) {
619                 skb = p->skb;
620                 p->skb = NULL;
621         } else {
622                 printk("cookie mismatch: %x != %x\n",
623                                 id >> PKT_BUFFER_BITS, p->cookie);
624         }
625         spin_unlock_irqrestore(&buffer_lock, flags);
626
627         return skb;
628 }
629
630 void fwd_discard_all(void) 
631 {
632         int i;
633
634         for (i = 0; i < N_PKT_BUFFERS; i++) {
635                 struct sk_buff *skb;
636                 unsigned long int flags;
637
638                 /* Defer kfree_skb() until interrupts re-enabled. */
639                 spin_lock_irqsave(&buffer_lock, flags);
640                 skb = buffers[i].skb;
641                 buffers[i].skb = NULL;
642                 spin_unlock_irqrestore(&buffer_lock, flags);
643
644                 kfree_skb(skb);
645         }
646 }
647
648 static void discard_skb(uint32_t id)
649 {
650         struct sk_buff *old_skb = NULL;
651         unsigned long int flags;
652         struct packet_buffer *p;
653
654         spin_lock_irqsave(&buffer_lock, flags);
655         p = &buffers[id & PKT_BUFFER_MASK];
656         if (p->cookie == id >> PKT_BUFFER_BITS) {
657                 /* Defer kfree_skb() until interrupts re-enabled. */
658                 old_skb = p->skb;
659                 p->skb = NULL;
660         }
661         spin_unlock_irqrestore(&buffer_lock, flags);
662
663         if (old_skb)
664                 kfree_skb(old_skb);
665 }
666
667 void fwd_exit(void)
668 {
669         fwd_discard_all();
670 }
671
672 /* Utility functions. */
673
674 /* Makes '*pskb' writable, possibly copying it and setting '*pskb' to point to
675  * the copy.
676  * Returns 1 if successful, 0 on failure. */
677 static int
678 make_writable(struct sk_buff **pskb)
679 {
680         /* Based on skb_make_writable() in net/netfilter/core.c. */
681         struct sk_buff *nskb;
682
683         /* Not exclusive use of packet?  Must copy. */
684         if (skb_shared(*pskb) || skb_cloned(*pskb))
685                 goto copy_skb;
686
687         return pskb_may_pull(*pskb, 40); /* FIXME? */
688
689 copy_skb:
690         nskb = skb_copy(*pskb, GFP_ATOMIC);
691         if (!nskb)
692                 return 0;
693         BUG_ON(skb_is_nonlinear(nskb));
694
695         /* Rest of kernel will get very unhappy if we pass it a
696            suddenly-orphaned skbuff */
697         if ((*pskb)->sk)
698                 skb_set_owner_w(nskb, (*pskb)->sk);
699         kfree_skb(*pskb);
700         *pskb = nskb;
701         return 1;
702 }