6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <asm/uaccess.h>
23 /* Each xfrm_state may be linked to two tables:
25 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
26 2. Hash table by daddr to find what SAs exist for given
27 destination/tunnel endpoint. (output)
30 static DEFINE_SPINLOCK(xfrm_state_lock);
32 /* Hash table to find appropriate SA towards given target (endpoint
33 * of tunnel or destination of transport mode) allowed by selector.
35 * Main use is finding SA after policy selected tunnel or transport mode.
36 * Also, it can be used by ah/esp icmp error handler to find offending SA.
38 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
39 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
41 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42 EXPORT_SYMBOL(km_waitq);
44 static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
45 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
47 static struct work_struct xfrm_state_gc_work;
48 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
49 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
51 static void __xfrm_state_delete(struct xfrm_state *x);
53 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
54 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
56 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
57 static void km_state_expired(struct xfrm_state *x, int hard);
59 static void xfrm_state_gc_destroy(struct xfrm_state *x)
61 if (del_timer(&x->timer))
72 x->type->destructor(x);
73 xfrm_put_type(x->type);
78 static void xfrm_state_gc_task(void *data)
81 struct list_head *entry, *tmp;
82 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
84 spin_lock_bh(&xfrm_state_gc_lock);
85 list_splice_init(&xfrm_state_gc_list, &gc_list);
86 spin_unlock_bh(&xfrm_state_gc_lock);
88 list_for_each_safe(entry, tmp, &gc_list) {
89 x = list_entry(entry, struct xfrm_state, bydst);
90 xfrm_state_gc_destroy(x);
95 static inline unsigned long make_jiffies(long secs)
97 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
98 return MAX_SCHEDULE_TIMEOUT-1;
103 static void xfrm_timer_handler(unsigned long data)
105 struct xfrm_state *x = (struct xfrm_state*)data;
106 unsigned long now = (unsigned long)xtime.tv_sec;
107 long next = LONG_MAX;
111 if (x->km.state == XFRM_STATE_DEAD)
113 if (x->km.state == XFRM_STATE_EXPIRED)
115 if (x->lft.hard_add_expires_seconds) {
116 long tmo = x->lft.hard_add_expires_seconds +
117 x->curlft.add_time - now;
123 if (x->lft.hard_use_expires_seconds) {
124 long tmo = x->lft.hard_use_expires_seconds +
125 (x->curlft.use_time ? : now) - now;
133 if (x->lft.soft_add_expires_seconds) {
134 long tmo = x->lft.soft_add_expires_seconds +
135 x->curlft.add_time - now;
141 if (x->lft.soft_use_expires_seconds) {
142 long tmo = x->lft.soft_use_expires_seconds +
143 (x->curlft.use_time ? : now) - now;
151 km_state_expired(x, 0);
153 if (next != LONG_MAX &&
154 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
159 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
160 x->km.state = XFRM_STATE_EXPIRED;
166 km_state_expired(x, 1);
167 __xfrm_state_delete(x);
170 spin_unlock(&x->lock);
174 struct xfrm_state *xfrm_state_alloc(void)
176 struct xfrm_state *x;
178 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
181 memset(x, 0, sizeof(struct xfrm_state));
182 atomic_set(&x->refcnt, 1);
183 atomic_set(&x->tunnel_users, 0);
184 INIT_LIST_HEAD(&x->bydst);
185 INIT_LIST_HEAD(&x->byspi);
186 init_timer(&x->timer);
187 x->timer.function = xfrm_timer_handler;
188 x->timer.data = (unsigned long)x;
189 x->curlft.add_time = (unsigned long)xtime.tv_sec;
190 x->lft.soft_byte_limit = XFRM_INF;
191 x->lft.soft_packet_limit = XFRM_INF;
192 x->lft.hard_byte_limit = XFRM_INF;
193 x->lft.hard_packet_limit = XFRM_INF;
194 spin_lock_init(&x->lock);
198 EXPORT_SYMBOL(xfrm_state_alloc);
200 void __xfrm_state_destroy(struct xfrm_state *x)
202 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
204 spin_lock_bh(&xfrm_state_gc_lock);
205 list_add(&x->bydst, &xfrm_state_gc_list);
206 spin_unlock_bh(&xfrm_state_gc_lock);
207 schedule_work(&xfrm_state_gc_work);
209 EXPORT_SYMBOL(__xfrm_state_destroy);
211 static void __xfrm_state_delete(struct xfrm_state *x)
213 if (x->km.state != XFRM_STATE_DEAD) {
214 x->km.state = XFRM_STATE_DEAD;
215 spin_lock(&xfrm_state_lock);
217 atomic_dec(&x->refcnt);
220 atomic_dec(&x->refcnt);
222 spin_unlock(&xfrm_state_lock);
223 if (del_timer(&x->timer))
224 atomic_dec(&x->refcnt);
226 /* The number two in this test is the reference
227 * mentioned in the comment below plus the reference
228 * our caller holds. A larger value means that
229 * there are DSTs attached to this xfrm_state.
231 if (atomic_read(&x->refcnt) > 2)
232 xfrm_flush_bundles();
234 /* All xfrm_state objects are created by xfrm_state_alloc.
235 * The xfrm_state_alloc call gives a reference, and that
236 * is what we are dropping here.
238 atomic_dec(&x->refcnt);
242 void xfrm_state_delete(struct xfrm_state *x)
244 spin_lock_bh(&x->lock);
245 __xfrm_state_delete(x);
246 spin_unlock_bh(&x->lock);
248 EXPORT_SYMBOL(xfrm_state_delete);
250 void xfrm_state_flush(u8 proto)
253 struct xfrm_state *x;
255 spin_lock_bh(&xfrm_state_lock);
256 for (i = 0; i < XFRM_DST_HSIZE; i++) {
258 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
259 if (!xfrm_state_kern(x) &&
260 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
262 spin_unlock_bh(&xfrm_state_lock);
264 xfrm_state_delete(x);
267 spin_lock_bh(&xfrm_state_lock);
272 spin_unlock_bh(&xfrm_state_lock);
275 EXPORT_SYMBOL(xfrm_state_flush);
278 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
279 struct xfrm_tmpl *tmpl,
280 xfrm_address_t *daddr, xfrm_address_t *saddr,
281 unsigned short family)
283 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
286 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
287 xfrm_state_put_afinfo(afinfo);
292 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
293 struct flowi *fl, struct xfrm_tmpl *tmpl,
294 struct xfrm_policy *pol, int *err,
295 unsigned short family)
297 unsigned h = xfrm_dst_hash(daddr, family);
298 struct xfrm_state *x;
299 int acquire_in_progress = 0;
301 struct xfrm_state *best = NULL;
303 spin_lock_bh(&xfrm_state_lock);
304 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
305 if (x->props.family == family &&
306 x->props.reqid == tmpl->reqid &&
307 xfrm_state_addr_check(x, daddr, saddr, family) &&
308 tmpl->mode == x->props.mode &&
309 tmpl->id.proto == x->id.proto) {
311 1. There is a valid state with matching selector.
313 2. Valid state with inappropriate selector. Skip.
315 Entering area of "sysdeps".
317 3. If state is not valid, selector is temporary,
318 it selects only session which triggered
319 previous resolution. Key manager will do
320 something to install a state with proper
323 if (x->km.state == XFRM_STATE_VALID) {
324 if (!xfrm_selector_match(&x->sel, fl, family))
327 best->km.dying > x->km.dying ||
328 (best->km.dying == x->km.dying &&
329 best->curlft.add_time < x->curlft.add_time))
331 } else if (x->km.state == XFRM_STATE_ACQ) {
332 acquire_in_progress = 1;
333 } else if (x->km.state == XFRM_STATE_ERROR ||
334 x->km.state == XFRM_STATE_EXPIRED) {
335 if (xfrm_selector_match(&x->sel, fl, family))
342 if (!x && !error && !acquire_in_progress &&
343 ((x = xfrm_state_alloc()) != NULL)) {
344 /* Initialize temporary selector matching only
345 * to current session. */
346 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
348 if (km_query(x, tmpl, pol) == 0) {
349 x->km.state = XFRM_STATE_ACQ;
350 list_add_tail(&x->bydst, xfrm_state_bydst+h);
353 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
354 list_add(&x->byspi, xfrm_state_byspi+h);
357 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
359 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
360 add_timer(&x->timer);
362 x->km.state = XFRM_STATE_DEAD;
371 *err = acquire_in_progress ? -EAGAIN :
372 (error ? -ESRCH : -ENOMEM);
373 spin_unlock_bh(&xfrm_state_lock);
377 static void __xfrm_state_insert(struct xfrm_state *x)
379 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
381 list_add(&x->bydst, xfrm_state_bydst+h);
384 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
386 list_add(&x->byspi, xfrm_state_byspi+h);
389 if (!mod_timer(&x->timer, jiffies + HZ))
395 void xfrm_state_insert(struct xfrm_state *x)
397 spin_lock_bh(&xfrm_state_lock);
398 __xfrm_state_insert(x);
399 spin_unlock_bh(&xfrm_state_lock);
401 EXPORT_SYMBOL(xfrm_state_insert);
403 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq);
405 int xfrm_state_add(struct xfrm_state *x)
407 struct xfrm_state_afinfo *afinfo;
408 struct xfrm_state *x1;
412 family = x->props.family;
413 afinfo = xfrm_state_get_afinfo(family);
414 if (unlikely(afinfo == NULL))
415 return -EAFNOSUPPORT;
417 spin_lock_bh(&xfrm_state_lock);
419 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
428 x1 = __xfrm_find_acq_byseq(x->km.seq);
429 if (x1 && xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family)) {
436 x1 = afinfo->find_acq(
437 x->props.mode, x->props.reqid, x->id.proto,
438 &x->id.daddr, &x->props.saddr, 0);
440 __xfrm_state_insert(x);
444 spin_unlock_bh(&xfrm_state_lock);
445 xfrm_state_put_afinfo(afinfo);
448 xfrm_state_delete(x1);
454 EXPORT_SYMBOL(xfrm_state_add);
456 int xfrm_state_update(struct xfrm_state *x)
458 struct xfrm_state_afinfo *afinfo;
459 struct xfrm_state *x1;
462 afinfo = xfrm_state_get_afinfo(x->props.family);
463 if (unlikely(afinfo == NULL))
464 return -EAFNOSUPPORT;
466 spin_lock_bh(&xfrm_state_lock);
467 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
473 if (xfrm_state_kern(x1)) {
479 if (x1->km.state == XFRM_STATE_ACQ) {
480 __xfrm_state_insert(x);
486 spin_unlock_bh(&xfrm_state_lock);
487 xfrm_state_put_afinfo(afinfo);
493 xfrm_state_delete(x1);
499 spin_lock_bh(&x1->lock);
500 if (likely(x1->km.state == XFRM_STATE_VALID)) {
501 if (x->encap && x1->encap)
502 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
503 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
506 if (!mod_timer(&x1->timer, jiffies + HZ))
508 if (x1->curlft.use_time)
509 xfrm_state_check_expire(x1);
513 spin_unlock_bh(&x1->lock);
519 EXPORT_SYMBOL(xfrm_state_update);
521 int xfrm_state_check_expire(struct xfrm_state *x)
523 if (!x->curlft.use_time)
524 x->curlft.use_time = (unsigned long)xtime.tv_sec;
526 if (x->km.state != XFRM_STATE_VALID)
529 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
530 x->curlft.packets >= x->lft.hard_packet_limit) {
531 km_state_expired(x, 1);
532 if (!mod_timer(&x->timer, jiffies + XFRM_ACQ_EXPIRES*HZ))
538 (x->curlft.bytes >= x->lft.soft_byte_limit ||
539 x->curlft.packets >= x->lft.soft_packet_limit))
540 km_state_expired(x, 0);
543 EXPORT_SYMBOL(xfrm_state_check_expire);
545 static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
547 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
551 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
553 /* Check tail too... */
557 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
559 int err = xfrm_state_check_expire(x);
562 err = xfrm_state_check_space(x, skb);
566 EXPORT_SYMBOL(xfrm_state_check);
569 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
570 unsigned short family)
572 struct xfrm_state *x;
573 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
577 spin_lock_bh(&xfrm_state_lock);
578 x = afinfo->state_lookup(daddr, spi, proto);
579 spin_unlock_bh(&xfrm_state_lock);
580 xfrm_state_put_afinfo(afinfo);
583 EXPORT_SYMBOL(xfrm_state_lookup);
586 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
587 xfrm_address_t *daddr, xfrm_address_t *saddr,
588 int create, unsigned short family)
590 struct xfrm_state *x;
591 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
595 spin_lock_bh(&xfrm_state_lock);
596 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
597 spin_unlock_bh(&xfrm_state_lock);
598 xfrm_state_put_afinfo(afinfo);
601 EXPORT_SYMBOL(xfrm_find_acq);
603 /* Silly enough, but I'm lazy to build resolution list */
605 static struct xfrm_state *__xfrm_find_acq_byseq(u32 seq)
608 struct xfrm_state *x;
610 for (i = 0; i < XFRM_DST_HSIZE; i++) {
611 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
612 if (x->km.seq == seq && x->km.state == XFRM_STATE_ACQ) {
621 struct xfrm_state *xfrm_find_acq_byseq(u32 seq)
623 struct xfrm_state *x;
625 spin_lock_bh(&xfrm_state_lock);
626 x = __xfrm_find_acq_byseq(seq);
627 spin_unlock_bh(&xfrm_state_lock);
630 EXPORT_SYMBOL(xfrm_find_acq_byseq);
632 u32 xfrm_get_acqseq(void)
636 static DEFINE_SPINLOCK(acqseq_lock);
638 spin_lock_bh(&acqseq_lock);
639 res = (++acqseq ? : ++acqseq);
640 spin_unlock_bh(&acqseq_lock);
643 EXPORT_SYMBOL(xfrm_get_acqseq);
646 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
649 struct xfrm_state *x0;
654 if (minspi == maxspi) {
655 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
663 minspi = ntohl(minspi);
664 maxspi = ntohl(maxspi);
665 for (h=0; h<maxspi-minspi+1; h++) {
666 spi = minspi + net_random()%(maxspi-minspi+1);
667 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
669 x->id.spi = htonl(spi);
676 spin_lock_bh(&xfrm_state_lock);
677 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
678 list_add(&x->byspi, xfrm_state_byspi+h);
680 spin_unlock_bh(&xfrm_state_lock);
684 EXPORT_SYMBOL(xfrm_alloc_spi);
686 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
690 struct xfrm_state *x;
694 spin_lock_bh(&xfrm_state_lock);
695 for (i = 0; i < XFRM_DST_HSIZE; i++) {
696 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
697 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
706 for (i = 0; i < XFRM_DST_HSIZE; i++) {
707 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
708 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
710 err = func(x, --count, data);
716 spin_unlock_bh(&xfrm_state_lock);
719 EXPORT_SYMBOL(xfrm_state_walk);
721 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
727 if (unlikely(seq == 0))
730 if (likely(seq > x->replay.seq))
733 diff = x->replay.seq - seq;
734 if (diff >= x->props.replay_window) {
735 x->stats.replay_window++;
739 if (x->replay.bitmap & (1U << diff)) {
745 EXPORT_SYMBOL(xfrm_replay_check);
747 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
753 if (seq > x->replay.seq) {
754 diff = seq - x->replay.seq;
755 if (diff < x->props.replay_window)
756 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
758 x->replay.bitmap = 1;
761 diff = x->replay.seq - seq;
762 x->replay.bitmap |= (1U << diff);
765 EXPORT_SYMBOL(xfrm_replay_advance);
767 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
768 static DEFINE_RWLOCK(xfrm_km_lock);
770 static void km_state_expired(struct xfrm_state *x, int hard)
775 x->km.state = XFRM_STATE_EXPIRED;
779 read_lock(&xfrm_km_lock);
780 list_for_each_entry(km, &xfrm_km_list, list)
782 read_unlock(&xfrm_km_lock);
788 static int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
793 read_lock(&xfrm_km_lock);
794 list_for_each_entry(km, &xfrm_km_list, list) {
795 err = km->acquire(x, t, pol, XFRM_POLICY_OUT);
799 read_unlock(&xfrm_km_lock);
803 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
808 read_lock(&xfrm_km_lock);
809 list_for_each_entry(km, &xfrm_km_list, list) {
811 err = km->new_mapping(x, ipaddr, sport);
815 read_unlock(&xfrm_km_lock);
818 EXPORT_SYMBOL(km_new_mapping);
820 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
824 read_lock(&xfrm_km_lock);
825 list_for_each_entry(km, &xfrm_km_list, list)
826 if (km->notify_policy)
827 km->notify_policy(pol, dir, hard);
828 read_unlock(&xfrm_km_lock);
834 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
839 struct xfrm_policy *pol = NULL;
841 if (optlen <= 0 || optlen > PAGE_SIZE)
844 data = kmalloc(optlen, GFP_KERNEL);
849 if (copy_from_user(data, optval, optlen))
853 read_lock(&xfrm_km_lock);
854 list_for_each_entry(km, &xfrm_km_list, list) {
855 pol = km->compile_policy(sk->sk_family, optname, data,
860 read_unlock(&xfrm_km_lock);
863 xfrm_sk_policy_insert(sk, err, pol);
872 EXPORT_SYMBOL(xfrm_user_policy);
874 int xfrm_register_km(struct xfrm_mgr *km)
876 write_lock_bh(&xfrm_km_lock);
877 list_add_tail(&km->list, &xfrm_km_list);
878 write_unlock_bh(&xfrm_km_lock);
881 EXPORT_SYMBOL(xfrm_register_km);
883 int xfrm_unregister_km(struct xfrm_mgr *km)
885 write_lock_bh(&xfrm_km_lock);
887 write_unlock_bh(&xfrm_km_lock);
890 EXPORT_SYMBOL(xfrm_unregister_km);
892 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
895 if (unlikely(afinfo == NULL))
897 if (unlikely(afinfo->family >= NPROTO))
898 return -EAFNOSUPPORT;
899 write_lock(&xfrm_state_afinfo_lock);
900 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
903 afinfo->state_bydst = xfrm_state_bydst;
904 afinfo->state_byspi = xfrm_state_byspi;
905 xfrm_state_afinfo[afinfo->family] = afinfo;
907 write_unlock(&xfrm_state_afinfo_lock);
910 EXPORT_SYMBOL(xfrm_state_register_afinfo);
912 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
915 if (unlikely(afinfo == NULL))
917 if (unlikely(afinfo->family >= NPROTO))
918 return -EAFNOSUPPORT;
919 write_lock(&xfrm_state_afinfo_lock);
920 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
921 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
924 xfrm_state_afinfo[afinfo->family] = NULL;
925 afinfo->state_byspi = NULL;
926 afinfo->state_bydst = NULL;
929 write_unlock(&xfrm_state_afinfo_lock);
932 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
934 static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
936 struct xfrm_state_afinfo *afinfo;
937 if (unlikely(family >= NPROTO))
939 read_lock(&xfrm_state_afinfo_lock);
940 afinfo = xfrm_state_afinfo[family];
941 if (likely(afinfo != NULL))
942 read_lock(&afinfo->lock);
943 read_unlock(&xfrm_state_afinfo_lock);
947 static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
949 if (unlikely(afinfo == NULL))
951 read_unlock(&afinfo->lock);
954 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
955 void xfrm_state_delete_tunnel(struct xfrm_state *x)
958 struct xfrm_state *t = x->tunnel;
960 if (atomic_read(&t->tunnel_users) == 2)
961 xfrm_state_delete(t);
962 atomic_dec(&t->tunnel_users);
967 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
969 void __init xfrm_state_init(void)
973 for (i=0; i<XFRM_DST_HSIZE; i++) {
974 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
975 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
977 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);