This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / net / ipv6 / xfrm6_tunnel.c
1 /*
2  * Copyright (C)2003,2004 USAGI/WIDE Project
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  * 
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  * 
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17  *
18  * Authors      Mitsuru KANDA  <mk@linux-ipv6.org>
19  *              YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
20  *
21  * Based on net/ipv4/xfrm4_tunnel.c
22  *
23  */
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/xfrm.h>
27 #include <linux/list.h>
28 #include <net/ip.h>
29 #include <net/xfrm.h>
30 #include <net/icmp.h>
31 #include <net/ipv6.h>
32 #include <linux/ipv6.h>
33 #include <linux/icmpv6.h>
34
35 #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
36 # define X6TDEBUG       3
37 #else
38 # define X6TDEBUG       1
39 #endif
40
41 #define X6TPRINTK(fmt, args...)         printk(fmt, ## args)
42 #define X6TNOPRINTK(fmt, args...)       do { ; } while(0)
43
44 #if X6TDEBUG >= 1
45 # define X6TPRINTK1     X6TPRINTK
46 #else
47 # define X6TPRINTK1     X6TNOPRINTK
48 #endif
49
50 #if X6TDEBUG >= 3
51 # define X6TPRINTK3     X6TPRINTK
52 #else
53 # define X6TPRINTK3     X6TNOPRINTK
54 #endif
55
56 /*
57  * xfrm_tunnel_spi things are for allocating unique id ("spi") 
58  * per xfrm_address_t.
59  */
60 struct xfrm6_tunnel_spi {
61         struct hlist_node list_byaddr;
62         struct hlist_node list_byspi;
63         xfrm_address_t addr;
64         u32 spi;
65         atomic_t refcnt;
66 #ifdef XFRM6_TUNNEL_SPI_MAGIC
67         u32 magic;
68 #endif
69 };
70
71 #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
72 # define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef
73 #endif
74
75 static rwlock_t xfrm6_tunnel_spi_lock = RW_LOCK_UNLOCKED;
76
77 static u32 xfrm6_tunnel_spi;
78
79 #define XFRM6_TUNNEL_SPI_MIN    1
80 #define XFRM6_TUNNEL_SPI_MAX    0xffffffff
81
82 static kmem_cache_t *xfrm6_tunnel_spi_kmem;
83
84 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
85 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
86
87 static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
88 static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
89
90 #ifdef XFRM6_TUNNEL_SPI_MAGIC
91 static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
92                              const char *name)
93 {
94         if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) {
95                 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
96                                       "at %p has corrupted magic %08x "
97                                       "(should be %08x)\n",
98                            name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC);
99                 return -1;
100         }
101         return 0;
102 }
103 #else
104 static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
105                                     const char *name)
106 {
107         return 0;
108 }
109 #endif
110
111 #define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__)
112
113
114 static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
115 {
116         unsigned h;
117
118         X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr);
119
120         h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3];
121         h ^= h >> 16;
122         h ^= h >> 8;
123         h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
124
125         X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h);
126
127         return h;
128 }
129
130 static unsigned inline xfrm6_tunnel_spi_hash_byspi(u32 spi)
131 {
132         return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
133 }
134
135
136 static int xfrm6_tunnel_spi_init(void)
137 {
138         int i;
139
140         X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
141
142         xfrm6_tunnel_spi = 0;
143         xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
144                                                   sizeof(struct xfrm6_tunnel_spi),
145                                                   0, SLAB_HWCACHE_ALIGN,
146                                                   NULL, NULL);
147         if (!xfrm6_tunnel_spi_kmem) {
148                 X6TPRINTK1(KERN_ERR
149                            "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n",
150                            __FUNCTION__);
151                 return -ENOMEM;
152         }
153
154         for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
155                 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
156         for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
157                 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
158         return 0;
159 }
160
161 static void xfrm6_tunnel_spi_fini(void)
162 {
163         int i;
164
165         X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
166
167         for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
168                 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
169                         goto err;
170         }
171         for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
172                 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
173                         goto err;
174         }
175         kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
176         xfrm6_tunnel_spi_kmem = NULL;
177         return;
178 err:
179         X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__);
180         return;
181 }
182
183 static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
184 {
185         struct xfrm6_tunnel_spi *x6spi;
186         struct hlist_node *pos;
187
188         X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
189
190         hlist_for_each_entry(x6spi, pos,
191                              &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
192                              list_byaddr) {
193                 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
194                         X6SPI_CHECK_MAGIC(x6spi);
195                         X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi);
196                         return x6spi;
197                 }
198         }
199
200         X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__);
201         return NULL;
202 }
203
204 u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
205 {
206         struct xfrm6_tunnel_spi *x6spi;
207         u32 spi;
208
209         X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
210
211         read_lock_bh(&xfrm6_tunnel_spi_lock);
212         x6spi = __xfrm6_tunnel_spi_lookup(saddr);
213         spi = x6spi ? x6spi->spi : 0;
214         read_unlock_bh(&xfrm6_tunnel_spi_lock);
215         return spi;
216 }
217
218 EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
219
220 static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
221 {
222         u32 spi;
223         struct xfrm6_tunnel_spi *x6spi;
224         struct hlist_node *pos;
225         unsigned index;
226
227         X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
228
229         if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
230             xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
231                 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
232         else
233                 xfrm6_tunnel_spi++;
234
235         for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
236                 index = xfrm6_tunnel_spi_hash_byspi(spi);
237                 hlist_for_each_entry(x6spi, pos, 
238                                      &xfrm6_tunnel_spi_byspi[index], 
239                                      list_byspi) {
240                         if (x6spi->spi == spi)
241                                 goto try_next_1;
242                 }
243                 xfrm6_tunnel_spi = spi;
244                 goto alloc_spi;
245 try_next_1:;
246         }
247         for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) {
248                 index = xfrm6_tunnel_spi_hash_byspi(spi);
249                 hlist_for_each_entry(x6spi, pos, 
250                                      &xfrm6_tunnel_spi_byspi[index], 
251                                      list_byspi) {
252                         if (x6spi->spi == spi)
253                                 goto try_next_2;
254                 }
255                 xfrm6_tunnel_spi = spi;
256                 goto alloc_spi;
257 try_next_2:;
258         }
259         spi = 0;
260         goto out;
261 alloc_spi:
262         X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for "
263                               "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 
264                               __FUNCTION__, 
265                               NIP6(*(struct in6_addr *)saddr));
266         x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
267         if (!x6spi) {
268                 X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n", 
269                            __FUNCTION__);
270                 goto out;
271         }
272 #ifdef XFRM6_TUNNEL_SPI_MAGIC
273         x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC;
274 #endif
275         memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
276         x6spi->spi = spi;
277         atomic_set(&x6spi->refcnt, 1);
278
279         hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]);
280
281         index = xfrm6_tunnel_spi_hash_byaddr(saddr);
282         hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
283         X6SPI_CHECK_MAGIC(x6spi);
284 out:
285         X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
286         return spi;
287 }
288
289 u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
290 {
291         struct xfrm6_tunnel_spi *x6spi;
292         u32 spi;
293
294         X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
295
296         write_lock_bh(&xfrm6_tunnel_spi_lock);
297         x6spi = __xfrm6_tunnel_spi_lookup(saddr);
298         if (x6spi) {
299                 atomic_inc(&x6spi->refcnt);
300                 spi = x6spi->spi;
301         } else
302                 spi = __xfrm6_tunnel_alloc_spi(saddr);
303         write_unlock_bh(&xfrm6_tunnel_spi_lock);
304
305         X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
306
307         return spi;
308 }
309
310 EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
311
312 void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
313 {
314         struct xfrm6_tunnel_spi *x6spi;
315         struct hlist_node *pos, *n;
316
317         X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
318
319         write_lock_bh(&xfrm6_tunnel_spi_lock);
320
321         hlist_for_each_entry_safe(x6spi, pos, n, 
322                                   &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
323                                   list_byaddr)
324         {
325                 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
326                         X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
327                                               "for %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
328                                               "found at %p\n",
329                                    __FUNCTION__, 
330                                    NIP6(*(struct in6_addr *)saddr),
331                                    x6spi);
332                         X6SPI_CHECK_MAGIC(x6spi);
333                         if (atomic_dec_and_test(&x6spi->refcnt)) {
334                                 hlist_del(&x6spi->list_byaddr);
335                                 hlist_del(&x6spi->list_byspi);
336                                 kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi);
337                                 break;
338                         }
339                 }
340         }
341         write_unlock_bh(&xfrm6_tunnel_spi_lock);
342 }
343
344 EXPORT_SYMBOL(xfrm6_tunnel_free_spi);
345
346 int xfrm6_tunnel_check_size(struct sk_buff *skb)
347 {
348         int mtu, ret = 0;
349         struct dst_entry *dst = skb->dst;
350
351         mtu = dst_pmtu(dst) - sizeof(struct ipv6hdr);
352         if (mtu < IPV6_MIN_MTU)
353                 mtu = IPV6_MIN_MTU;
354
355         if (skb->len > mtu) {
356                 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
357                 ret = -EMSGSIZE;
358         }
359
360         return ret;
361 }
362
363 EXPORT_SYMBOL(xfrm6_tunnel_check_size);
364
365 static int xfrm6_tunnel_output(struct sk_buff **pskb)
366 {
367         struct sk_buff *skb = *pskb;
368         struct dst_entry *dst = skb->dst;
369         struct xfrm_state *x = dst->xfrm;
370         struct ipv6hdr *iph, *top_iph;
371         int err;
372
373         if ((err = xfrm6_tunnel_check_size(skb)) != 0)
374                 goto error_nolock;
375
376         iph = skb->nh.ipv6h;
377
378         top_iph = (struct ipv6hdr *)skb_push(skb, x->props.header_len);
379         top_iph->version = 6;
380         top_iph->priority = iph->priority;
381         top_iph->flow_lbl[0] = iph->flow_lbl[0];
382         top_iph->flow_lbl[1] = iph->flow_lbl[1];
383         top_iph->flow_lbl[2] = iph->flow_lbl[2];
384         top_iph->nexthdr = IPPROTO_IPV6; 
385         top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
386         top_iph->hop_limit = iph->hop_limit;
387         memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr));
388         memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr));
389         skb->nh.raw = skb->data;
390         skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr);
391
392         x->curlft.bytes += skb->len;
393         x->curlft.packets++;
394
395         spin_unlock_bh(&x->lock);
396
397         if ((skb->dst = dst_pop(dst)) == NULL) { 
398                 kfree_skb(skb);
399                 err = -EHOSTUNREACH;
400                 goto error_nolock;
401         }
402
403         return NET_XMIT_BYPASS;
404
405 error_nolock:
406         kfree_skb(skb);
407         return err;
408 }
409
410 static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
411 {
412         if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 
413                 return -EINVAL;
414
415         skb->mac.raw = skb->nh.raw;
416         skb->nh.raw = skb->data;
417         dst_release(skb->dst);
418         skb->dst = NULL;
419         skb->protocol = htons(ETH_P_IPV6);
420         skb->pkt_type = PACKET_HOST;
421         netif_rx(skb);
422
423         return 0;
424 }
425
426 static struct xfrm6_tunnel *xfrm6_tunnel_handler;
427 static DECLARE_MUTEX(xfrm6_tunnel_sem);
428
429 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler)
430 {
431         int ret;
432
433         down(&xfrm6_tunnel_sem);
434         ret = 0;
435         if (xfrm6_tunnel_handler != NULL)
436                 ret = -EINVAL;
437         if (!ret)
438                 xfrm6_tunnel_handler = handler;
439         up(&xfrm6_tunnel_sem);
440
441         return ret;
442 }
443
444 EXPORT_SYMBOL(xfrm6_tunnel_register);
445
446 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler)
447 {
448         int ret;
449
450         down(&xfrm6_tunnel_sem);
451         ret = 0;
452         if (xfrm6_tunnel_handler != handler)
453                 ret = -EINVAL;
454         if (!ret)
455                 xfrm6_tunnel_handler = NULL;
456         up(&xfrm6_tunnel_sem);
457
458         synchronize_net();
459
460         return ret;
461 }
462
463 EXPORT_SYMBOL(xfrm6_tunnel_deregister);
464
465 static int xfrm6_tunnel_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
466 {
467         struct sk_buff *skb = *pskb;
468         struct xfrm6_tunnel *handler = xfrm6_tunnel_handler;
469         struct xfrm_state *x = NULL;
470         struct ipv6hdr *iph = skb->nh.ipv6h;
471         int err = 0;
472         u32 spi;
473
474         /* device-like_ip6ip6_handler() */
475         if (handler) {
476                 err = handler->handler(pskb, nhoffp);
477                 if (!err)
478                         goto out;
479         }
480
481         spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
482         x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, 
483                         spi,
484                         IPPROTO_IPV6, AF_INET6);
485
486         if (!x)
487                 goto drop;
488
489         spin_lock(&x->lock);
490
491         if (unlikely(x->km.state != XFRM_STATE_VALID))
492                 goto drop_unlock;
493
494         err = xfrm6_tunnel_input(x, NULL, skb);
495         if (err)
496                 goto drop_unlock;
497
498         x->curlft.bytes += skb->len;
499         x->curlft.packets++; 
500         spin_unlock(&x->lock); 
501         xfrm_state_put(x); 
502
503 out:
504         return 0;
505
506 drop_unlock:
507         spin_unlock(&x->lock);
508         xfrm_state_put(x);
509 drop:
510         kfree_skb(skb);
511         return -1;
512 }
513
514 static void xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
515                              int type, int code, int offset, __u32 info)
516 {
517         struct xfrm6_tunnel *handler = xfrm6_tunnel_handler;
518
519         /* call here first for device-like ip6ip6 err handling */
520         if (handler) {
521                 handler->err_handler(skb, opt, type, code, offset, info);
522                 return;
523         }
524
525         /* xfrm6_tunnel native err handling */
526         switch (type) {
527         case ICMPV6_DEST_UNREACH: 
528                 switch (code) {
529                 case ICMPV6_NOROUTE: 
530                 case ICMPV6_ADM_PROHIBITED:
531                 case ICMPV6_NOT_NEIGHBOUR:
532                 case ICMPV6_ADDR_UNREACH:
533                 case ICMPV6_PORT_UNREACH:
534                 default:
535                         X6TPRINTK3(KERN_DEBUG
536                                    "xfrm6_tunnel: Destination Unreach.\n");
537                         break;
538                 }
539                 break;
540         case ICMPV6_PKT_TOOBIG:
541                         X6TPRINTK3(KERN_DEBUG 
542                                    "xfrm6_tunnel: Packet Too Big.\n");
543                 break;
544         case ICMPV6_TIME_EXCEED:
545                 switch (code) {
546                 case ICMPV6_EXC_HOPLIMIT:
547                         X6TPRINTK3(KERN_DEBUG
548                                    "xfrm6_tunnel: Too small Hoplimit.\n");
549                         break;
550                 case ICMPV6_EXC_FRAGTIME:
551                 default: 
552                         break;
553                 }
554                 break;
555         case ICMPV6_PARAMPROB:
556                 switch (code) {
557                 case ICMPV6_HDR_FIELD: break;
558                 case ICMPV6_UNK_NEXTHDR: break;
559                 case ICMPV6_UNK_OPTION: break;
560                 }
561                 break;
562         default:
563                 break;
564         }
565         return;
566 }
567
568 static int xfrm6_tunnel_init_state(struct xfrm_state *x, void *args)
569 {
570         if (!x->props.mode)
571                 return -EINVAL;
572
573         x->props.header_len = sizeof(struct ipv6hdr);
574
575         return 0;
576 }
577
578 static void xfrm6_tunnel_destroy(struct xfrm_state *x)
579 {
580         xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
581 }
582
583 static struct xfrm_type xfrm6_tunnel_type = {
584         .description    = "IP6IP6",
585         .owner          = THIS_MODULE,
586         .proto          = IPPROTO_IPV6,
587         .init_state     = xfrm6_tunnel_init_state,
588         .destructor     = xfrm6_tunnel_destroy,
589         .input          = xfrm6_tunnel_input,
590         .output         = xfrm6_tunnel_output,
591 };
592
593 static struct inet6_protocol xfrm6_tunnel_protocol = {
594         .handler        = xfrm6_tunnel_rcv,
595         .err_handler    = xfrm6_tunnel_err, 
596         .flags          = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
597 };
598
599 void __init xfrm6_tunnel_init(void)
600 {
601         X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
602
603         if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) {
604                 X6TPRINTK1(KERN_ERR
605                            "xfrm6_tunnel init: can't add xfrm type\n");
606                 return;
607         }
608         if (inet6_add_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6) < 0) {
609                 X6TPRINTK1(KERN_ERR
610                            "xfrm6_tunnel init(): can't add protocol\n");
611                 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
612                 return;
613         }
614         if (xfrm6_tunnel_spi_init() < 0) {
615                 X6TPRINTK1(KERN_ERR
616                            "xfrm6_tunnel init: failed to initialize spi\n");
617                 inet6_del_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6);
618                 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
619                 return;
620         }
621 }
622
623 void __exit xfrm6_tunnel_fini(void)
624 {
625         X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
626
627         xfrm6_tunnel_spi_fini();
628         if (inet6_del_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6) < 0)
629                 X6TPRINTK1(KERN_ERR 
630                            "xfrm6_tunnel close: can't remove protocol\n");
631         if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0)
632                 X6TPRINTK1(KERN_ERR
633                            "xfrm6_tunnel close: can't remove xfrm type\n");
634 }