2 * Copyright (C)2003,2004 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Authors Mitsuru KANDA <mk@linux-ipv6.org>
19 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
21 * Based on net/ipv4/xfrm4_tunnel.c
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/xfrm.h>
27 #include <linux/list.h>
31 #include <net/protocol.h>
32 #include <linux/ipv6.h>
33 #include <linux/icmpv6.h>
35 #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
41 #define X6TPRINTK(fmt, args...) printk(fmt, ## args)
42 #define X6TNOPRINTK(fmt, args...) do { ; } while(0)
45 # define X6TPRINTK1 X6TPRINTK
47 # define X6TPRINTK1 X6TNOPRINTK
51 # define X6TPRINTK3 X6TPRINTK
53 # define X6TPRINTK3 X6TNOPRINTK
57 * xfrm_tunnel_spi things are for allocating unique id ("spi")
60 struct xfrm6_tunnel_spi {
61 struct hlist_node list_byaddr;
62 struct hlist_node list_byspi;
66 #ifdef XFRM6_TUNNEL_SPI_MAGIC
71 #ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
72 # define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef
75 static rwlock_t xfrm6_tunnel_spi_lock = RW_LOCK_UNLOCKED;
77 static u32 xfrm6_tunnel_spi;
79 #define XFRM6_TUNNEL_SPI_MIN 1
80 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff
82 static kmem_cache_t *xfrm6_tunnel_spi_kmem;
84 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
85 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
87 static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
88 static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
90 #ifdef XFRM6_TUNNEL_SPI_MAGIC
91 static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
94 if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) {
95 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
96 "at %p has corrupted magic %08x "
98 name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC);
104 static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi,
111 #define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__)
114 static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
118 X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr);
120 h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3];
123 h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
125 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h);
130 static unsigned inline xfrm6_tunnel_spi_hash_byspi(u32 spi)
132 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
136 static int xfrm6_tunnel_spi_init(void)
140 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
142 xfrm6_tunnel_spi = 0;
143 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
144 sizeof(struct xfrm6_tunnel_spi),
145 0, SLAB_HWCACHE_ALIGN,
147 if (!xfrm6_tunnel_spi_kmem) {
149 "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n",
154 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
155 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
156 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
157 INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
161 static void xfrm6_tunnel_spi_fini(void)
165 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
167 for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
168 if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
171 for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
172 if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
175 kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
176 xfrm6_tunnel_spi_kmem = NULL;
179 X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__);
183 static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
185 struct xfrm6_tunnel_spi *x6spi;
186 struct hlist_node *pos;
188 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
190 hlist_for_each_entry(x6spi, pos,
191 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
193 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
194 X6SPI_CHECK_MAGIC(x6spi);
195 X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi);
200 X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__);
204 u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
206 struct xfrm6_tunnel_spi *x6spi;
209 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
211 read_lock_bh(&xfrm6_tunnel_spi_lock);
212 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
213 spi = x6spi ? x6spi->spi : 0;
214 read_unlock_bh(&xfrm6_tunnel_spi_lock);
218 EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
220 static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
223 struct xfrm6_tunnel_spi *x6spi;
224 struct hlist_node *pos;
227 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
229 if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
230 xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
231 xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
235 for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
236 index = xfrm6_tunnel_spi_hash_byspi(spi);
237 hlist_for_each_entry(x6spi, pos,
238 &xfrm6_tunnel_spi_byspi[index],
240 if (x6spi->spi == spi)
243 xfrm6_tunnel_spi = spi;
247 for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) {
248 index = xfrm6_tunnel_spi_hash_byspi(spi);
249 hlist_for_each_entry(x6spi, pos,
250 &xfrm6_tunnel_spi_byspi[index],
252 if (x6spi->spi == spi)
255 xfrm6_tunnel_spi = spi;
262 X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for "
263 "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
265 NIP6(*(struct in6_addr *)saddr));
266 x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
268 X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n",
272 #ifdef XFRM6_TUNNEL_SPI_MAGIC
273 x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC;
275 memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
277 atomic_set(&x6spi->refcnt, 1);
279 hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]);
281 index = xfrm6_tunnel_spi_hash_byaddr(saddr);
282 hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
283 X6SPI_CHECK_MAGIC(x6spi);
285 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
289 u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
291 struct xfrm6_tunnel_spi *x6spi;
294 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
296 write_lock_bh(&xfrm6_tunnel_spi_lock);
297 x6spi = __xfrm6_tunnel_spi_lookup(saddr);
299 atomic_inc(&x6spi->refcnt);
302 spi = __xfrm6_tunnel_alloc_spi(saddr);
303 write_unlock_bh(&xfrm6_tunnel_spi_lock);
305 X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi);
310 EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
312 void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
314 struct xfrm6_tunnel_spi *x6spi;
315 struct hlist_node *pos, *n;
317 X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr);
319 write_lock_bh(&xfrm6_tunnel_spi_lock);
321 hlist_for_each_entry_safe(x6spi, pos, n,
322 &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
325 if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
326 X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
327 "for %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
330 NIP6(*(struct in6_addr *)saddr),
332 X6SPI_CHECK_MAGIC(x6spi);
333 if (atomic_dec_and_test(&x6spi->refcnt)) {
334 hlist_del(&x6spi->list_byaddr);
335 hlist_del(&x6spi->list_byspi);
336 kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi);
341 write_unlock_bh(&xfrm6_tunnel_spi_lock);
344 EXPORT_SYMBOL(xfrm6_tunnel_free_spi);
346 static int xfrm6_tunnel_output(struct sk_buff **pskb)
348 struct sk_buff *skb = *pskb;
349 struct ipv6hdr *top_iph;
351 top_iph = (struct ipv6hdr *)skb->data;
352 top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
357 static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb)
359 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
362 skb->mac.raw = skb->nh.raw;
363 skb->nh.raw = skb->data;
364 dst_release(skb->dst);
366 skb->protocol = htons(ETH_P_IPV6);
367 skb->pkt_type = PACKET_HOST;
373 static struct xfrm6_tunnel *xfrm6_tunnel_handler;
374 static DECLARE_MUTEX(xfrm6_tunnel_sem);
376 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler)
380 down(&xfrm6_tunnel_sem);
382 if (xfrm6_tunnel_handler != NULL)
385 xfrm6_tunnel_handler = handler;
386 up(&xfrm6_tunnel_sem);
391 EXPORT_SYMBOL(xfrm6_tunnel_register);
393 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler)
397 down(&xfrm6_tunnel_sem);
399 if (xfrm6_tunnel_handler != handler)
402 xfrm6_tunnel_handler = NULL;
403 up(&xfrm6_tunnel_sem);
410 EXPORT_SYMBOL(xfrm6_tunnel_deregister);
412 static int xfrm6_tunnel_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
414 struct sk_buff *skb = *pskb;
415 struct xfrm6_tunnel *handler = xfrm6_tunnel_handler;
416 struct xfrm_state *x = NULL;
417 struct ipv6hdr *iph = skb->nh.ipv6h;
421 /* device-like_ip6ip6_handler() */
423 err = handler->handler(pskb, nhoffp);
428 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
429 x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr,
431 IPPROTO_IPV6, AF_INET6);
438 if (unlikely(x->km.state != XFRM_STATE_VALID))
441 err = xfrm6_tunnel_input(x, NULL, skb);
445 x->curlft.bytes += skb->len;
447 spin_unlock(&x->lock);
454 spin_unlock(&x->lock);
461 static void xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
462 int type, int code, int offset, __u32 info)
464 struct xfrm6_tunnel *handler = xfrm6_tunnel_handler;
466 /* call here first for device-like ip6ip6 err handling */
468 handler->err_handler(skb, opt, type, code, offset, info);
472 /* xfrm6_tunnel native err handling */
474 case ICMPV6_DEST_UNREACH:
477 case ICMPV6_ADM_PROHIBITED:
478 case ICMPV6_NOT_NEIGHBOUR:
479 case ICMPV6_ADDR_UNREACH:
480 case ICMPV6_PORT_UNREACH:
482 X6TPRINTK3(KERN_DEBUG
483 "xfrm6_tunnel: Destination Unreach.\n");
487 case ICMPV6_PKT_TOOBIG:
488 X6TPRINTK3(KERN_DEBUG
489 "xfrm6_tunnel: Packet Too Big.\n");
491 case ICMPV6_TIME_EXCEED:
493 case ICMPV6_EXC_HOPLIMIT:
494 X6TPRINTK3(KERN_DEBUG
495 "xfrm6_tunnel: Too small Hoplimit.\n");
497 case ICMPV6_EXC_FRAGTIME:
502 case ICMPV6_PARAMPROB:
504 case ICMPV6_HDR_FIELD: break;
505 case ICMPV6_UNK_NEXTHDR: break;
506 case ICMPV6_UNK_OPTION: break;
515 static int xfrm6_tunnel_init_state(struct xfrm_state *x, void *args)
520 x->props.header_len = sizeof(struct ipv6hdr);
525 static void xfrm6_tunnel_destroy(struct xfrm_state *x)
527 xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
530 static struct xfrm_type xfrm6_tunnel_type = {
531 .description = "IP6IP6",
532 .owner = THIS_MODULE,
533 .proto = IPPROTO_IPV6,
534 .init_state = xfrm6_tunnel_init_state,
535 .destructor = xfrm6_tunnel_destroy,
536 .input = xfrm6_tunnel_input,
537 .output = xfrm6_tunnel_output,
540 static struct inet6_protocol xfrm6_tunnel_protocol = {
541 .handler = xfrm6_tunnel_rcv,
542 .err_handler = xfrm6_tunnel_err,
543 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
546 void __init xfrm6_tunnel_init(void)
548 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
550 if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) {
552 "xfrm6_tunnel init: can't add xfrm type\n");
555 if (inet6_add_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6) < 0) {
557 "xfrm6_tunnel init(): can't add protocol\n");
558 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
561 if (xfrm6_tunnel_spi_init() < 0) {
563 "xfrm6_tunnel init: failed to initialize spi\n");
564 inet6_del_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6);
565 xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
570 void __exit xfrm6_tunnel_fini(void)
572 X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__);
574 xfrm6_tunnel_spi_fini();
575 if (inet6_del_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6) < 0)
577 "xfrm6_tunnel close: can't remove protocol\n");
578 if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0)
580 "xfrm6_tunnel close: can't remove xfrm type\n");