6 * Kazunori MIYAZAWA @USAGI
7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * YOSHIFUJI Hideaki @USAGI
10 * Split up af-specific functions
11 * Derek Atkins <derek@ihtfp.com>
12 * Add UDP Encapsulation
16 #include <linux/workqueue.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <asm/uaccess.h>
22 /* Each xfrm_state may be linked to two tables:
24 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
25 2. Hash table by daddr to find what SAs exist for given
26 destination/tunnel endpoint. (output)
29 static spinlock_t xfrm_state_lock = SPIN_LOCK_UNLOCKED;
31 /* Hash table to find appropriate SA towards given target (endpoint
32 * of tunnel or destination of transport mode) allowed by selector.
34 * Main use is finding SA after policy selected tunnel or transport mode.
35 * Also, it can be used by ah/esp icmp error handler to find offending SA.
37 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
38 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
40 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
42 static rwlock_t xfrm_state_afinfo_lock = RW_LOCK_UNLOCKED;
43 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
45 static struct work_struct xfrm_state_gc_work;
46 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
47 static spinlock_t xfrm_state_gc_lock = SPIN_LOCK_UNLOCKED;
49 static void __xfrm_state_delete(struct xfrm_state *x);
51 static void xfrm_state_gc_destroy(struct xfrm_state *x)
53 if (del_timer(&x->timer))
64 x->type->destructor(x);
65 xfrm_put_type(x->type);
71 static void xfrm_state_gc_task(void *data)
74 struct list_head *entry, *tmp;
75 struct list_head gc_list = LIST_HEAD_INIT(gc_list);
77 spin_lock_bh(&xfrm_state_gc_lock);
78 list_splice_init(&xfrm_state_gc_list, &gc_list);
79 spin_unlock_bh(&xfrm_state_gc_lock);
81 list_for_each_safe(entry, tmp, &gc_list) {
82 x = list_entry(entry, struct xfrm_state, bydst);
83 xfrm_state_gc_destroy(x);
87 static inline unsigned long make_jiffies(long secs)
89 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
90 return MAX_SCHEDULE_TIMEOUT-1;
95 static void xfrm_timer_handler(unsigned long data)
97 struct xfrm_state *x = (struct xfrm_state*)data;
98 unsigned long now = (unsigned long)xtime.tv_sec;
103 if (x->km.state == XFRM_STATE_DEAD)
105 if (x->km.state == XFRM_STATE_EXPIRED)
107 if (x->lft.hard_add_expires_seconds) {
108 long tmo = x->lft.hard_add_expires_seconds +
109 x->curlft.add_time - now;
115 if (x->lft.hard_use_expires_seconds) {
116 long tmo = x->lft.hard_use_expires_seconds +
117 (x->curlft.use_time ? : now) - now;
125 if (x->lft.soft_add_expires_seconds) {
126 long tmo = x->lft.soft_add_expires_seconds +
127 x->curlft.add_time - now;
133 if (x->lft.soft_use_expires_seconds) {
134 long tmo = x->lft.soft_use_expires_seconds +
135 (x->curlft.use_time ? : now) - now;
143 km_state_expired(x, 0);
145 if (next != LONG_MAX &&
146 !mod_timer(&x->timer, jiffies + make_jiffies(next)))
151 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
152 x->km.state = XFRM_STATE_EXPIRED;
158 km_state_expired(x, 1);
159 __xfrm_state_delete(x);
162 spin_unlock(&x->lock);
166 struct xfrm_state *xfrm_state_alloc(void)
168 struct xfrm_state *x;
170 x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
173 memset(x, 0, sizeof(struct xfrm_state));
174 atomic_set(&x->refcnt, 1);
175 atomic_set(&x->tunnel_users, 0);
176 INIT_LIST_HEAD(&x->bydst);
177 INIT_LIST_HEAD(&x->byspi);
178 init_timer(&x->timer);
179 x->timer.function = xfrm_timer_handler;
180 x->timer.data = (unsigned long)x;
181 x->curlft.add_time = (unsigned long)xtime.tv_sec;
182 x->lft.soft_byte_limit = XFRM_INF;
183 x->lft.soft_packet_limit = XFRM_INF;
184 x->lft.hard_byte_limit = XFRM_INF;
185 x->lft.hard_packet_limit = XFRM_INF;
186 x->lock = SPIN_LOCK_UNLOCKED;
191 void __xfrm_state_destroy(struct xfrm_state *x)
193 BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
195 spin_lock_bh(&xfrm_state_gc_lock);
196 list_add(&x->bydst, &xfrm_state_gc_list);
197 spin_unlock_bh(&xfrm_state_gc_lock);
198 schedule_work(&xfrm_state_gc_work);
201 static void __xfrm_state_delete(struct xfrm_state *x)
203 if (x->km.state != XFRM_STATE_DEAD) {
204 x->km.state = XFRM_STATE_DEAD;
205 spin_lock(&xfrm_state_lock);
207 atomic_dec(&x->refcnt);
210 atomic_dec(&x->refcnt);
212 spin_unlock(&xfrm_state_lock);
213 if (del_timer(&x->timer))
214 atomic_dec(&x->refcnt);
216 /* The number two in this test is the reference
217 * mentioned in the comment below plus the reference
218 * our caller holds. A larger value means that
219 * there are DSTs attached to this xfrm_state.
221 if (atomic_read(&x->refcnt) > 2)
222 xfrm_flush_bundles();
224 /* All xfrm_state objects are created by xfrm_state_alloc.
225 * The xfrm_state_alloc call gives a reference, and that
226 * is what we are dropping here.
228 atomic_dec(&x->refcnt);
232 void xfrm_state_delete(struct xfrm_state *x)
234 spin_lock_bh(&x->lock);
235 __xfrm_state_delete(x);
236 spin_unlock_bh(&x->lock);
239 void xfrm_state_flush(u8 proto)
242 struct xfrm_state *x;
244 spin_lock_bh(&xfrm_state_lock);
245 for (i = 0; i < XFRM_DST_HSIZE; i++) {
247 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
248 if (!xfrm_state_kern(x) &&
249 (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
251 spin_unlock_bh(&xfrm_state_lock);
253 xfrm_state_delete(x);
256 spin_lock_bh(&xfrm_state_lock);
261 spin_unlock_bh(&xfrm_state_lock);
266 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
267 struct xfrm_tmpl *tmpl,
268 xfrm_address_t *daddr, xfrm_address_t *saddr,
269 unsigned short family)
271 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
274 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
275 xfrm_state_put_afinfo(afinfo);
280 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
281 struct flowi *fl, struct xfrm_tmpl *tmpl,
282 struct xfrm_policy *pol, int *err,
283 unsigned short family)
285 unsigned h = xfrm_dst_hash(daddr, family);
286 struct xfrm_state *x;
287 int acquire_in_progress = 0;
289 struct xfrm_state *best = NULL;
291 spin_lock_bh(&xfrm_state_lock);
292 list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
293 if (x->props.family == family &&
294 x->props.reqid == tmpl->reqid &&
295 xfrm_state_addr_check(x, daddr, saddr, family) &&
296 tmpl->mode == x->props.mode &&
297 tmpl->id.proto == x->id.proto) {
299 1. There is a valid state with matching selector.
301 2. Valid state with inappropriate selector. Skip.
303 Entering area of "sysdeps".
305 3. If state is not valid, selector is temporary,
306 it selects only session which triggered
307 previous resolution. Key manager will do
308 something to install a state with proper
311 if (x->km.state == XFRM_STATE_VALID) {
312 if (!xfrm_selector_match(&x->sel, fl, family))
315 best->km.dying > x->km.dying ||
316 (best->km.dying == x->km.dying &&
317 best->curlft.add_time < x->curlft.add_time))
319 } else if (x->km.state == XFRM_STATE_ACQ) {
320 acquire_in_progress = 1;
321 } else if (x->km.state == XFRM_STATE_ERROR ||
322 x->km.state == XFRM_STATE_EXPIRED) {
323 if (xfrm_selector_match(&x->sel, fl, family))
330 if (!x && !error && !acquire_in_progress &&
331 ((x = xfrm_state_alloc()) != NULL)) {
332 /* Initialize temporary selector matching only
333 * to current session. */
334 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
336 if (km_query(x, tmpl, pol) == 0) {
337 x->km.state = XFRM_STATE_ACQ;
338 list_add_tail(&x->bydst, xfrm_state_bydst+h);
341 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
342 list_add(&x->byspi, xfrm_state_byspi+h);
345 x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
347 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
348 add_timer(&x->timer);
350 x->km.state = XFRM_STATE_DEAD;
359 *err = acquire_in_progress ? -EAGAIN :
360 (error ? -ESRCH : -ENOMEM);
361 spin_unlock_bh(&xfrm_state_lock);
365 static void __xfrm_state_insert(struct xfrm_state *x)
367 unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
369 list_add(&x->bydst, xfrm_state_bydst+h);
372 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
374 list_add(&x->byspi, xfrm_state_byspi+h);
377 if (!mod_timer(&x->timer, jiffies + HZ))
383 void xfrm_state_insert(struct xfrm_state *x)
385 spin_lock_bh(&xfrm_state_lock);
386 __xfrm_state_insert(x);
387 spin_unlock_bh(&xfrm_state_lock);
390 int xfrm_state_add(struct xfrm_state *x)
392 struct xfrm_state_afinfo *afinfo;
393 struct xfrm_state *x1;
396 afinfo = xfrm_state_get_afinfo(x->props.family);
397 if (unlikely(afinfo == NULL))
398 return -EAFNOSUPPORT;
400 spin_lock_bh(&xfrm_state_lock);
402 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
404 x1 = afinfo->find_acq(
405 x->props.mode, x->props.reqid, x->id.proto,
406 &x->id.daddr, &x->props.saddr, 0);
407 if (x1 && x1->id.spi != x->id.spi && x1->id.spi) {
413 if (x1 && x1->id.spi) {
420 __xfrm_state_insert(x);
424 spin_unlock_bh(&xfrm_state_lock);
425 xfrm_state_put_afinfo(afinfo);
428 xfrm_state_delete(x1);
435 int xfrm_state_update(struct xfrm_state *x)
437 struct xfrm_state_afinfo *afinfo;
438 struct xfrm_state *x1;
441 afinfo = xfrm_state_get_afinfo(x->props.family);
442 if (unlikely(afinfo == NULL))
443 return -EAFNOSUPPORT;
445 spin_lock_bh(&xfrm_state_lock);
446 x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
452 if (xfrm_state_kern(x1)) {
458 if (x1->km.state == XFRM_STATE_ACQ) {
459 __xfrm_state_insert(x);
465 spin_unlock_bh(&xfrm_state_lock);
466 xfrm_state_put_afinfo(afinfo);
472 xfrm_state_delete(x1);
478 spin_lock_bh(&x1->lock);
479 if (likely(x1->km.state == XFRM_STATE_VALID)) {
480 if (x->encap && x1->encap)
481 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
482 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
485 if (!mod_timer(&x1->timer, jiffies + HZ))
487 if (x1->curlft.use_time)
488 xfrm_state_check_expire(x1);
492 spin_unlock_bh(&x1->lock);
499 int xfrm_state_check_expire(struct xfrm_state *x)
501 if (!x->curlft.use_time)
502 x->curlft.use_time = (unsigned long)xtime.tv_sec;
504 if (x->km.state != XFRM_STATE_VALID)
507 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
508 x->curlft.packets >= x->lft.hard_packet_limit) {
509 km_state_expired(x, 1);
510 if (!mod_timer(&x->timer, jiffies + XFRM_ACQ_EXPIRES*HZ))
516 (x->curlft.bytes >= x->lft.soft_byte_limit ||
517 x->curlft.packets >= x->lft.soft_packet_limit))
518 km_state_expired(x, 0);
522 int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
524 int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
528 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
530 /* Check tail too... */
534 int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb)
536 int err = xfrm_state_check_expire(x);
539 err = xfrm_state_check_space(x, skb);
545 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
546 unsigned short family)
548 struct xfrm_state *x;
549 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
553 spin_lock_bh(&xfrm_state_lock);
554 x = afinfo->state_lookup(daddr, spi, proto);
555 spin_unlock_bh(&xfrm_state_lock);
556 xfrm_state_put_afinfo(afinfo);
561 xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
562 xfrm_address_t *daddr, xfrm_address_t *saddr,
563 int create, unsigned short family)
565 struct xfrm_state *x;
566 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
570 spin_lock_bh(&xfrm_state_lock);
571 x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
572 spin_unlock_bh(&xfrm_state_lock);
573 xfrm_state_put_afinfo(afinfo);
577 /* Silly enough, but I'm lazy to build resolution list */
579 struct xfrm_state * xfrm_find_acq_byseq(u32 seq)
582 struct xfrm_state *x;
584 spin_lock_bh(&xfrm_state_lock);
585 for (i = 0; i < XFRM_DST_HSIZE; i++) {
586 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
587 if (x->km.seq == seq) {
589 spin_unlock_bh(&xfrm_state_lock);
594 spin_unlock_bh(&xfrm_state_lock);
598 u32 xfrm_get_acqseq(void)
602 static spinlock_t acqseq_lock = SPIN_LOCK_UNLOCKED;
604 spin_lock_bh(&acqseq_lock);
605 res = (++acqseq ? : ++acqseq);
606 spin_unlock_bh(&acqseq_lock);
611 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
614 struct xfrm_state *x0;
619 if (minspi == maxspi) {
620 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
628 minspi = ntohl(minspi);
629 maxspi = ntohl(maxspi);
630 for (h=0; h<maxspi-minspi+1; h++) {
631 spi = minspi + net_random()%(maxspi-minspi+1);
632 x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
637 x->id.spi = htonl(spi);
640 spin_lock_bh(&xfrm_state_lock);
641 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
642 list_add(&x->byspi, xfrm_state_byspi+h);
644 spin_unlock_bh(&xfrm_state_lock);
649 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
653 struct xfrm_state *x;
657 spin_lock_bh(&xfrm_state_lock);
658 for (i = 0; i < XFRM_DST_HSIZE; i++) {
659 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
660 if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
669 for (i = 0; i < XFRM_DST_HSIZE; i++) {
670 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
671 if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
673 err = func(x, --count, data);
679 spin_unlock_bh(&xfrm_state_lock);
684 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
690 if (unlikely(seq == 0))
693 if (likely(seq > x->replay.seq))
696 diff = x->replay.seq - seq;
697 if (diff >= x->props.replay_window) {
698 x->stats.replay_window++;
702 if (x->replay.bitmap & (1U << diff)) {
709 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
715 if (seq > x->replay.seq) {
716 diff = seq - x->replay.seq;
717 if (diff < x->props.replay_window)
718 x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
720 x->replay.bitmap = 1;
723 diff = x->replay.seq - seq;
724 x->replay.bitmap |= (1U << diff);
728 int xfrm_check_selectors(struct xfrm_state **x, int n, struct flowi *fl)
732 for (i=0; i<n; i++) {
734 match = xfrm_selector_match(&x[i]->sel, fl, x[i]->props.family);
741 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
742 static rwlock_t xfrm_km_lock = RW_LOCK_UNLOCKED;
744 void km_state_expired(struct xfrm_state *x, int hard)
749 x->km.state = XFRM_STATE_EXPIRED;
753 read_lock(&xfrm_km_lock);
754 list_for_each_entry(km, &xfrm_km_list, list)
756 read_unlock(&xfrm_km_lock);
762 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
767 read_lock(&xfrm_km_lock);
768 list_for_each_entry(km, &xfrm_km_list, list) {
769 err = km->acquire(x, t, pol, XFRM_POLICY_OUT);
773 read_unlock(&xfrm_km_lock);
777 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
782 read_lock(&xfrm_km_lock);
783 list_for_each_entry(km, &xfrm_km_list, list) {
785 err = km->new_mapping(x, ipaddr, sport);
789 read_unlock(&xfrm_km_lock);
793 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
797 read_lock(&xfrm_km_lock);
798 list_for_each_entry(km, &xfrm_km_list, list)
799 if (km->notify_policy)
800 km->notify_policy(pol, dir, hard);
801 read_unlock(&xfrm_km_lock);
807 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
812 struct xfrm_policy *pol = NULL;
814 if (optlen <= 0 || optlen > PAGE_SIZE)
817 data = kmalloc(optlen, GFP_KERNEL);
822 if (copy_from_user(data, optval, optlen))
826 read_lock(&xfrm_km_lock);
827 list_for_each_entry(km, &xfrm_km_list, list) {
828 pol = km->compile_policy(sk->sk_family, optname, data,
833 read_unlock(&xfrm_km_lock);
836 xfrm_sk_policy_insert(sk, err, pol);
846 int xfrm_register_km(struct xfrm_mgr *km)
848 write_lock_bh(&xfrm_km_lock);
849 list_add_tail(&km->list, &xfrm_km_list);
850 write_unlock_bh(&xfrm_km_lock);
854 int xfrm_unregister_km(struct xfrm_mgr *km)
856 write_lock_bh(&xfrm_km_lock);
858 write_unlock_bh(&xfrm_km_lock);
862 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
865 if (unlikely(afinfo == NULL))
867 if (unlikely(afinfo->family >= NPROTO))
868 return -EAFNOSUPPORT;
869 write_lock(&xfrm_state_afinfo_lock);
870 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
873 afinfo->state_bydst = xfrm_state_bydst;
874 afinfo->state_byspi = xfrm_state_byspi;
875 xfrm_state_afinfo[afinfo->family] = afinfo;
877 write_unlock(&xfrm_state_afinfo_lock);
881 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
884 if (unlikely(afinfo == NULL))
886 if (unlikely(afinfo->family >= NPROTO))
887 return -EAFNOSUPPORT;
888 write_lock(&xfrm_state_afinfo_lock);
889 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
890 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
893 xfrm_state_afinfo[afinfo->family] = NULL;
894 afinfo->state_byspi = NULL;
895 afinfo->state_bydst = NULL;
898 write_unlock(&xfrm_state_afinfo_lock);
902 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
904 struct xfrm_state_afinfo *afinfo;
905 if (unlikely(family >= NPROTO))
907 read_lock(&xfrm_state_afinfo_lock);
908 afinfo = xfrm_state_afinfo[family];
909 if (likely(afinfo != NULL))
910 read_lock(&afinfo->lock);
911 read_unlock(&xfrm_state_afinfo_lock);
915 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
917 if (unlikely(afinfo == NULL))
919 read_unlock(&afinfo->lock);
922 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
923 void xfrm_state_delete_tunnel(struct xfrm_state *x)
926 struct xfrm_state *t = x->tunnel;
928 if (atomic_read(&t->tunnel_users) == 2)
929 xfrm_state_delete(t);
930 atomic_dec(&t->tunnel_users);
936 void __init xfrm_state_init(void)
940 for (i=0; i<XFRM_DST_HSIZE; i++) {
941 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
942 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
944 INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);