patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / net / xfrm / xfrm_state.c
1 /*
2  * xfrm_state.c
3  *
4  * Changes:
5  *      Mitsuru KANDA @USAGI
6  *      Kazunori MIYAZAWA @USAGI
7  *      Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  *              IPv6 support
9  *      YOSHIFUJI Hideaki @USAGI
10  *              Split up af-specific functions
11  *      Derek Atkins <derek@ihtfp.com>
12  *              Add UDP Encapsulation
13  *      
14  */
15
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <asm/uaccess.h>
21
22 /* Each xfrm_state may be linked to two tables:
23
24    1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
25    2. Hash table by daddr to find what SAs exist for given
26       destination/tunnel endpoint. (output)
27  */
28
29 static spinlock_t xfrm_state_lock = SPIN_LOCK_UNLOCKED;
30
31 /* Hash table to find appropriate SA towards given target (endpoint
32  * of tunnel or destination of transport mode) allowed by selector.
33  *
34  * Main use is finding SA after policy selected tunnel or transport mode.
35  * Also, it can be used by ah/esp icmp error handler to find offending SA.
36  */
37 static struct list_head xfrm_state_bydst[XFRM_DST_HSIZE];
38 static struct list_head xfrm_state_byspi[XFRM_DST_HSIZE];
39
40 DECLARE_WAIT_QUEUE_HEAD(km_waitq);
41
42 static rwlock_t xfrm_state_afinfo_lock = RW_LOCK_UNLOCKED;
43 static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
44
45 static struct work_struct xfrm_state_gc_work;
46 static struct list_head xfrm_state_gc_list = LIST_HEAD_INIT(xfrm_state_gc_list);
47 static spinlock_t xfrm_state_gc_lock = SPIN_LOCK_UNLOCKED;
48
49 static void __xfrm_state_delete(struct xfrm_state *x);
50
51 static void xfrm_state_gc_destroy(struct xfrm_state *x)
52 {
53         if (del_timer(&x->timer))
54                 BUG();
55         if (x->aalg)
56                 kfree(x->aalg);
57         if (x->ealg)
58                 kfree(x->ealg);
59         if (x->calg)
60                 kfree(x->calg);
61         if (x->encap)
62                 kfree(x->encap);
63         if (x->type) {
64                 x->type->destructor(x);
65                 xfrm_put_type(x->type);
66         }
67         kfree(x);
68         wake_up(&km_waitq);
69 }
70
71 static void xfrm_state_gc_task(void *data)
72 {
73         struct xfrm_state *x;
74         struct list_head *entry, *tmp;
75         struct list_head gc_list = LIST_HEAD_INIT(gc_list);
76
77         spin_lock_bh(&xfrm_state_gc_lock);
78         list_splice_init(&xfrm_state_gc_list, &gc_list);
79         spin_unlock_bh(&xfrm_state_gc_lock);
80
81         list_for_each_safe(entry, tmp, &gc_list) {
82                 x = list_entry(entry, struct xfrm_state, bydst);
83                 xfrm_state_gc_destroy(x);
84         }
85 }
86
87 static inline unsigned long make_jiffies(long secs)
88 {
89         if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
90                 return MAX_SCHEDULE_TIMEOUT-1;
91         else
92                 return secs*HZ;
93 }
94
95 static void xfrm_timer_handler(unsigned long data)
96 {
97         struct xfrm_state *x = (struct xfrm_state*)data;
98         unsigned long now = (unsigned long)xtime.tv_sec;
99         long next = LONG_MAX;
100         int warn = 0;
101
102         spin_lock(&x->lock);
103         if (x->km.state == XFRM_STATE_DEAD)
104                 goto out;
105         if (x->km.state == XFRM_STATE_EXPIRED)
106                 goto expired;
107         if (x->lft.hard_add_expires_seconds) {
108                 long tmo = x->lft.hard_add_expires_seconds +
109                         x->curlft.add_time - now;
110                 if (tmo <= 0)
111                         goto expired;
112                 if (tmo < next)
113                         next = tmo;
114         }
115         if (x->lft.hard_use_expires_seconds) {
116                 long tmo = x->lft.hard_use_expires_seconds +
117                         (x->curlft.use_time ? : now) - now;
118                 if (tmo <= 0)
119                         goto expired;
120                 if (tmo < next)
121                         next = tmo;
122         }
123         if (x->km.dying)
124                 goto resched;
125         if (x->lft.soft_add_expires_seconds) {
126                 long tmo = x->lft.soft_add_expires_seconds +
127                         x->curlft.add_time - now;
128                 if (tmo <= 0)
129                         warn = 1;
130                 else if (tmo < next)
131                         next = tmo;
132         }
133         if (x->lft.soft_use_expires_seconds) {
134                 long tmo = x->lft.soft_use_expires_seconds +
135                         (x->curlft.use_time ? : now) - now;
136                 if (tmo <= 0)
137                         warn = 1;
138                 else if (tmo < next)
139                         next = tmo;
140         }
141
142         if (warn)
143                 km_state_expired(x, 0);
144 resched:
145         if (next != LONG_MAX &&
146             !mod_timer(&x->timer, jiffies + make_jiffies(next)))
147                 xfrm_state_hold(x);
148         goto out;
149
150 expired:
151         if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) {
152                 x->km.state = XFRM_STATE_EXPIRED;
153                 wake_up(&km_waitq);
154                 next = 2;
155                 goto resched;
156         }
157         if (x->id.spi != 0)
158                 km_state_expired(x, 1);
159         __xfrm_state_delete(x);
160
161 out:
162         spin_unlock(&x->lock);
163         xfrm_state_put(x);
164 }
165
166 struct xfrm_state *xfrm_state_alloc(void)
167 {
168         struct xfrm_state *x;
169
170         x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC);
171
172         if (x) {
173                 memset(x, 0, sizeof(struct xfrm_state));
174                 atomic_set(&x->refcnt, 1);
175                 atomic_set(&x->tunnel_users, 0);
176                 INIT_LIST_HEAD(&x->bydst);
177                 INIT_LIST_HEAD(&x->byspi);
178                 init_timer(&x->timer);
179                 x->timer.function = xfrm_timer_handler;
180                 x->timer.data     = (unsigned long)x;
181                 x->curlft.add_time = (unsigned long)xtime.tv_sec;
182                 x->lft.soft_byte_limit = XFRM_INF;
183                 x->lft.soft_packet_limit = XFRM_INF;
184                 x->lft.hard_byte_limit = XFRM_INF;
185                 x->lft.hard_packet_limit = XFRM_INF;
186                 x->lock = SPIN_LOCK_UNLOCKED;
187         }
188         return x;
189 }
190
191 void __xfrm_state_destroy(struct xfrm_state *x)
192 {
193         BUG_TRAP(x->km.state == XFRM_STATE_DEAD);
194
195         spin_lock_bh(&xfrm_state_gc_lock);
196         list_add(&x->bydst, &xfrm_state_gc_list);
197         spin_unlock_bh(&xfrm_state_gc_lock);
198         schedule_work(&xfrm_state_gc_work);
199 }
200
201 static void __xfrm_state_delete(struct xfrm_state *x)
202 {
203         if (x->km.state != XFRM_STATE_DEAD) {
204                 x->km.state = XFRM_STATE_DEAD;
205                 spin_lock(&xfrm_state_lock);
206                 list_del(&x->bydst);
207                 atomic_dec(&x->refcnt);
208                 if (x->id.spi) {
209                         list_del(&x->byspi);
210                         atomic_dec(&x->refcnt);
211                 }
212                 spin_unlock(&xfrm_state_lock);
213                 if (del_timer(&x->timer))
214                         atomic_dec(&x->refcnt);
215
216                 /* The number two in this test is the reference
217                  * mentioned in the comment below plus the reference
218                  * our caller holds.  A larger value means that
219                  * there are DSTs attached to this xfrm_state.
220                  */
221                 if (atomic_read(&x->refcnt) > 2)
222                         xfrm_flush_bundles();
223
224                 /* All xfrm_state objects are created by xfrm_state_alloc.
225                  * The xfrm_state_alloc call gives a reference, and that
226                  * is what we are dropping here.
227                  */
228                 atomic_dec(&x->refcnt);
229         }
230 }
231
232 void xfrm_state_delete(struct xfrm_state *x)
233 {
234         spin_lock_bh(&x->lock);
235         __xfrm_state_delete(x);
236         spin_unlock_bh(&x->lock);
237 }
238
239 void xfrm_state_flush(u8 proto)
240 {
241         int i;
242         struct xfrm_state *x;
243
244         spin_lock_bh(&xfrm_state_lock);
245         for (i = 0; i < XFRM_DST_HSIZE; i++) {
246 restart:
247                 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
248                         if (!xfrm_state_kern(x) &&
249                             (proto == IPSEC_PROTO_ANY || x->id.proto == proto)) {
250                                 xfrm_state_hold(x);
251                                 spin_unlock_bh(&xfrm_state_lock);
252
253                                 xfrm_state_delete(x);
254                                 xfrm_state_put(x);
255
256                                 spin_lock_bh(&xfrm_state_lock);
257                                 goto restart;
258                         }
259                 }
260         }
261         spin_unlock_bh(&xfrm_state_lock);
262         wake_up(&km_waitq);
263 }
264
265 static int
266 xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
267                   struct xfrm_tmpl *tmpl,
268                   xfrm_address_t *daddr, xfrm_address_t *saddr,
269                   unsigned short family)
270 {
271         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
272         if (!afinfo)
273                 return -1;
274         afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
275         xfrm_state_put_afinfo(afinfo);
276         return 0;
277 }
278
279 struct xfrm_state *
280 xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 
281                 struct flowi *fl, struct xfrm_tmpl *tmpl,
282                 struct xfrm_policy *pol, int *err,
283                 unsigned short family)
284 {
285         unsigned h = xfrm_dst_hash(daddr, family);
286         struct xfrm_state *x;
287         int acquire_in_progress = 0;
288         int error = 0;
289         struct xfrm_state *best = NULL;
290
291         spin_lock_bh(&xfrm_state_lock);
292         list_for_each_entry(x, xfrm_state_bydst+h, bydst) {
293                 if (x->props.family == family &&
294                     x->props.reqid == tmpl->reqid &&
295                     xfrm_state_addr_check(x, daddr, saddr, family) &&
296                     tmpl->mode == x->props.mode &&
297                     tmpl->id.proto == x->id.proto) {
298                         /* Resolution logic:
299                            1. There is a valid state with matching selector.
300                               Done.
301                            2. Valid state with inappropriate selector. Skip.
302
303                            Entering area of "sysdeps".
304
305                            3. If state is not valid, selector is temporary,
306                               it selects only session which triggered
307                               previous resolution. Key manager will do
308                               something to install a state with proper
309                               selector.
310                          */
311                         if (x->km.state == XFRM_STATE_VALID) {
312                                 if (!xfrm_selector_match(&x->sel, fl, family))
313                                         continue;
314                                 if (!best ||
315                                     best->km.dying > x->km.dying ||
316                                     (best->km.dying == x->km.dying &&
317                                      best->curlft.add_time < x->curlft.add_time))
318                                         best = x;
319                         } else if (x->km.state == XFRM_STATE_ACQ) {
320                                 acquire_in_progress = 1;
321                         } else if (x->km.state == XFRM_STATE_ERROR ||
322                                    x->km.state == XFRM_STATE_EXPIRED) {
323                                 if (xfrm_selector_match(&x->sel, fl, family))
324                                         error = 1;
325                         }
326                 }
327         }
328
329         x = best;
330         if (!x && !error && !acquire_in_progress &&
331             ((x = xfrm_state_alloc()) != NULL)) {
332                 /* Initialize temporary selector matching only
333                  * to current session. */
334                 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
335
336                 if (km_query(x, tmpl, pol) == 0) {
337                         x->km.state = XFRM_STATE_ACQ;
338                         list_add_tail(&x->bydst, xfrm_state_bydst+h);
339                         xfrm_state_hold(x);
340                         if (x->id.spi) {
341                                 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
342                                 list_add(&x->byspi, xfrm_state_byspi+h);
343                                 xfrm_state_hold(x);
344                         }
345                         x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES;
346                         xfrm_state_hold(x);
347                         x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
348                         add_timer(&x->timer);
349                 } else {
350                         x->km.state = XFRM_STATE_DEAD;
351                         xfrm_state_put(x);
352                         x = NULL;
353                         error = 1;
354                 }
355         }
356         if (x)
357                 xfrm_state_hold(x);
358         else
359                 *err = acquire_in_progress ? -EAGAIN :
360                         (error ? -ESRCH : -ENOMEM);
361         spin_unlock_bh(&xfrm_state_lock);
362         return x;
363 }
364
365 static void __xfrm_state_insert(struct xfrm_state *x)
366 {
367         unsigned h = xfrm_dst_hash(&x->id.daddr, x->props.family);
368
369         list_add(&x->bydst, xfrm_state_bydst+h);
370         xfrm_state_hold(x);
371
372         h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
373
374         list_add(&x->byspi, xfrm_state_byspi+h);
375         xfrm_state_hold(x);
376
377         if (!mod_timer(&x->timer, jiffies + HZ))
378                 xfrm_state_hold(x);
379
380         wake_up(&km_waitq);
381 }
382
383 void xfrm_state_insert(struct xfrm_state *x)
384 {
385         spin_lock_bh(&xfrm_state_lock);
386         __xfrm_state_insert(x);
387         spin_unlock_bh(&xfrm_state_lock);
388 }
389
390 int xfrm_state_add(struct xfrm_state *x)
391 {
392         struct xfrm_state_afinfo *afinfo;
393         struct xfrm_state *x1;
394         int err;
395
396         afinfo = xfrm_state_get_afinfo(x->props.family);
397         if (unlikely(afinfo == NULL))
398                 return -EAFNOSUPPORT;
399
400         spin_lock_bh(&xfrm_state_lock);
401
402         x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
403         if (!x1) {
404                 x1 = afinfo->find_acq(
405                         x->props.mode, x->props.reqid, x->id.proto,
406                         &x->id.daddr, &x->props.saddr, 0);
407                 if (x1 && x1->id.spi != x->id.spi && x1->id.spi) {
408                         xfrm_state_put(x1);
409                         x1 = NULL;
410                 }
411         }
412
413         if (x1 && x1->id.spi) {
414                 xfrm_state_put(x1);
415                 x1 = NULL;
416                 err = -EEXIST;
417                 goto out;
418         }
419
420         __xfrm_state_insert(x);
421         err = 0;
422
423 out:
424         spin_unlock_bh(&xfrm_state_lock);
425         xfrm_state_put_afinfo(afinfo);
426
427         if (x1) {
428                 xfrm_state_delete(x1);
429                 xfrm_state_put(x1);
430         }
431
432         return err;
433 }
434
435 int xfrm_state_update(struct xfrm_state *x)
436 {
437         struct xfrm_state_afinfo *afinfo;
438         struct xfrm_state *x1;
439         int err;
440
441         afinfo = xfrm_state_get_afinfo(x->props.family);
442         if (unlikely(afinfo == NULL))
443                 return -EAFNOSUPPORT;
444
445         spin_lock_bh(&xfrm_state_lock);
446         x1 = afinfo->state_lookup(&x->id.daddr, x->id.spi, x->id.proto);
447
448         err = -ESRCH;
449         if (!x1)
450                 goto out;
451
452         if (xfrm_state_kern(x1)) {
453                 xfrm_state_put(x1);
454                 err = -EEXIST;
455                 goto out;
456         }
457
458         if (x1->km.state == XFRM_STATE_ACQ) {
459                 __xfrm_state_insert(x);
460                 x = NULL;
461         }
462         err = 0;
463
464 out:
465         spin_unlock_bh(&xfrm_state_lock);
466         xfrm_state_put_afinfo(afinfo);
467
468         if (err)
469                 return err;
470
471         if (!x) {
472                 xfrm_state_delete(x1);
473                 xfrm_state_put(x1);
474                 return 0;
475         }
476
477         err = -EINVAL;
478         spin_lock_bh(&x1->lock);
479         if (likely(x1->km.state == XFRM_STATE_VALID)) {
480                 if (x->encap && x1->encap)
481                         memcpy(x1->encap, x->encap, sizeof(*x1->encap));
482                 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
483                 x1->km.dying = 0;
484
485                 if (!mod_timer(&x1->timer, jiffies + HZ))
486                         xfrm_state_hold(x1);
487                 if (x1->curlft.use_time)
488                         xfrm_state_check_expire(x1);
489
490                 err = 0;
491         }
492         spin_unlock_bh(&x1->lock);
493
494         xfrm_state_put(x1);
495
496         return err;
497 }
498
499 int xfrm_state_check_expire(struct xfrm_state *x)
500 {
501         if (!x->curlft.use_time)
502                 x->curlft.use_time = (unsigned long)xtime.tv_sec;
503
504         if (x->km.state != XFRM_STATE_VALID)
505                 return -EINVAL;
506
507         if (x->curlft.bytes >= x->lft.hard_byte_limit ||
508             x->curlft.packets >= x->lft.hard_packet_limit) {
509                 km_state_expired(x, 1);
510                 if (!mod_timer(&x->timer, jiffies + XFRM_ACQ_EXPIRES*HZ))
511                         xfrm_state_hold(x);
512                 return -EINVAL;
513         }
514
515         if (!x->km.dying &&
516             (x->curlft.bytes >= x->lft.soft_byte_limit ||
517              x->curlft.packets >= x->lft.soft_packet_limit))
518                 km_state_expired(x, 0);
519         return 0;
520 }
521
522 int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb)
523 {
524         int nhead = x->props.header_len + LL_RESERVED_SPACE(skb->dst->dev)
525                 - skb_headroom(skb);
526
527         if (nhead > 0)
528                 return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
529
530         /* Check tail too... */
531         return 0;
532 }
533
534 struct xfrm_state *
535 xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto,
536                   unsigned short family)
537 {
538         struct xfrm_state *x;
539         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
540         if (!afinfo)
541                 return NULL;
542
543         spin_lock_bh(&xfrm_state_lock);
544         x = afinfo->state_lookup(daddr, spi, proto);
545         spin_unlock_bh(&xfrm_state_lock);
546         xfrm_state_put_afinfo(afinfo);
547         return x;
548 }
549
550 struct xfrm_state *
551 xfrm_find_acq(u8 mode, u32 reqid, u8 proto, 
552               xfrm_address_t *daddr, xfrm_address_t *saddr, 
553               int create, unsigned short family)
554 {
555         struct xfrm_state *x;
556         struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
557         if (!afinfo)
558                 return NULL;
559
560         spin_lock_bh(&xfrm_state_lock);
561         x = afinfo->find_acq(mode, reqid, proto, daddr, saddr, create);
562         spin_unlock_bh(&xfrm_state_lock);
563         xfrm_state_put_afinfo(afinfo);
564         return x;
565 }
566
567 /* Silly enough, but I'm lazy to build resolution list */
568
569 struct xfrm_state * xfrm_find_acq_byseq(u32 seq)
570 {
571         int i;
572         struct xfrm_state *x;
573
574         spin_lock_bh(&xfrm_state_lock);
575         for (i = 0; i < XFRM_DST_HSIZE; i++) {
576                 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
577                         if (x->km.seq == seq) {
578                                 xfrm_state_hold(x);
579                                 spin_unlock_bh(&xfrm_state_lock);
580                                 return x;
581                         }
582                 }
583         }
584         spin_unlock_bh(&xfrm_state_lock);
585         return NULL;
586 }
587  
588 u32 xfrm_get_acqseq(void)
589 {
590         u32 res;
591         static u32 acqseq;
592         static spinlock_t acqseq_lock = SPIN_LOCK_UNLOCKED;
593
594         spin_lock_bh(&acqseq_lock);
595         res = (++acqseq ? : ++acqseq);
596         spin_unlock_bh(&acqseq_lock);
597         return res;
598 }
599
600 void
601 xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi)
602 {
603         u32 h;
604         struct xfrm_state *x0;
605
606         if (x->id.spi)
607                 return;
608
609         if (minspi == maxspi) {
610                 x0 = xfrm_state_lookup(&x->id.daddr, minspi, x->id.proto, x->props.family);
611                 if (x0) {
612                         xfrm_state_put(x0);
613                         return;
614                 }
615                 x->id.spi = minspi;
616         } else {
617                 u32 spi = 0;
618                 minspi = ntohl(minspi);
619                 maxspi = ntohl(maxspi);
620                 for (h=0; h<maxspi-minspi+1; h++) {
621                         spi = minspi + net_random()%(maxspi-minspi+1);
622                         x0 = xfrm_state_lookup(&x->id.daddr, htonl(spi), x->id.proto, x->props.family);
623                         if (x0 == NULL)
624                                 break;
625                         xfrm_state_put(x0);
626                 }
627                 x->id.spi = htonl(spi);
628         }
629         if (x->id.spi) {
630                 spin_lock_bh(&xfrm_state_lock);
631                 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family);
632                 list_add(&x->byspi, xfrm_state_byspi+h);
633                 xfrm_state_hold(x);
634                 spin_unlock_bh(&xfrm_state_lock);
635                 wake_up(&km_waitq);
636         }
637 }
638
639 int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*),
640                     void *data)
641 {
642         int i;
643         struct xfrm_state *x;
644         int count = 0;
645         int err = 0;
646
647         spin_lock_bh(&xfrm_state_lock);
648         for (i = 0; i < XFRM_DST_HSIZE; i++) {
649                 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
650                         if (proto == IPSEC_PROTO_ANY || x->id.proto == proto)
651                                 count++;
652                 }
653         }
654         if (count == 0) {
655                 err = -ENOENT;
656                 goto out;
657         }
658
659         for (i = 0; i < XFRM_DST_HSIZE; i++) {
660                 list_for_each_entry(x, xfrm_state_bydst+i, bydst) {
661                         if (proto != IPSEC_PROTO_ANY && x->id.proto != proto)
662                                 continue;
663                         err = func(x, --count, data);
664                         if (err)
665                                 goto out;
666                 }
667         }
668 out:
669         spin_unlock_bh(&xfrm_state_lock);
670         return err;
671 }
672
673
674 int xfrm_replay_check(struct xfrm_state *x, u32 seq)
675 {
676         u32 diff;
677
678         seq = ntohl(seq);
679
680         if (unlikely(seq == 0))
681                 return -EINVAL;
682
683         if (likely(seq > x->replay.seq))
684                 return 0;
685
686         diff = x->replay.seq - seq;
687         if (diff >= x->props.replay_window) {
688                 x->stats.replay_window++;
689                 return -EINVAL;
690         }
691
692         if (x->replay.bitmap & (1U << diff)) {
693                 x->stats.replay++;
694                 return -EINVAL;
695         }
696         return 0;
697 }
698
699 void xfrm_replay_advance(struct xfrm_state *x, u32 seq)
700 {
701         u32 diff;
702
703         seq = ntohl(seq);
704
705         if (seq > x->replay.seq) {
706                 diff = seq - x->replay.seq;
707                 if (diff < x->props.replay_window)
708                         x->replay.bitmap = ((x->replay.bitmap) << diff) | 1;
709                 else
710                         x->replay.bitmap = 1;
711                 x->replay.seq = seq;
712         } else {
713                 diff = x->replay.seq - seq;
714                 x->replay.bitmap |= (1U << diff);
715         }
716 }
717
718 int xfrm_check_selectors(struct xfrm_state **x, int n, struct flowi *fl)
719 {
720         int i;
721
722         for (i=0; i<n; i++) {
723                 int match;
724                 match = xfrm_selector_match(&x[i]->sel, fl, x[i]->props.family);
725                 if (!match)
726                         return -EINVAL;
727         }
728         return 0;
729 }
730
731 static struct list_head xfrm_km_list = LIST_HEAD_INIT(xfrm_km_list);
732 static rwlock_t         xfrm_km_lock = RW_LOCK_UNLOCKED;
733
734 void km_state_expired(struct xfrm_state *x, int hard)
735 {
736         struct xfrm_mgr *km;
737
738         if (hard)
739                 x->km.state = XFRM_STATE_EXPIRED;
740         else
741                 x->km.dying = 1;
742
743         read_lock(&xfrm_km_lock);
744         list_for_each_entry(km, &xfrm_km_list, list)
745                 km->notify(x, hard);
746         read_unlock(&xfrm_km_lock);
747
748         if (hard)
749                 wake_up(&km_waitq);
750 }
751
752 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
753 {
754         int err = -EINVAL;
755         struct xfrm_mgr *km;
756
757         read_lock(&xfrm_km_lock);
758         list_for_each_entry(km, &xfrm_km_list, list) {
759                 err = km->acquire(x, t, pol, XFRM_POLICY_OUT);
760                 if (!err)
761                         break;
762         }
763         read_unlock(&xfrm_km_lock);
764         return err;
765 }
766
767 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport)
768 {
769         int err = -EINVAL;
770         struct xfrm_mgr *km;
771
772         read_lock(&xfrm_km_lock);
773         list_for_each_entry(km, &xfrm_km_list, list) {
774                 if (km->new_mapping)
775                         err = km->new_mapping(x, ipaddr, sport);
776                 if (!err)
777                         break;
778         }
779         read_unlock(&xfrm_km_lock);
780         return err;
781 }
782
783 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard)
784 {
785         struct xfrm_mgr *km;
786
787         read_lock(&xfrm_km_lock);
788         list_for_each_entry(km, &xfrm_km_list, list)
789                 if (km->notify_policy)
790                         km->notify_policy(pol, dir, hard);
791         read_unlock(&xfrm_km_lock);
792
793         if (hard)
794                 wake_up(&km_waitq);
795 }
796
797 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
798 {
799         int err;
800         u8 *data;
801         struct xfrm_mgr *km;
802         struct xfrm_policy *pol = NULL;
803
804         if (optlen <= 0 || optlen > PAGE_SIZE)
805                 return -EMSGSIZE;
806
807         data = kmalloc(optlen, GFP_KERNEL);
808         if (!data)
809                 return -ENOMEM;
810
811         err = -EFAULT;
812         if (copy_from_user(data, optval, optlen))
813                 goto out;
814
815         err = -EINVAL;
816         read_lock(&xfrm_km_lock);
817         list_for_each_entry(km, &xfrm_km_list, list) {
818                 pol = km->compile_policy(sk->sk_family, optname, data,
819                                          optlen, &err);
820                 if (err >= 0)
821                         break;
822         }
823         read_unlock(&xfrm_km_lock);
824
825         if (err >= 0) {
826                 xfrm_sk_policy_insert(sk, err, pol);
827                 xfrm_pol_put(pol);
828                 err = 0;
829         }
830
831 out:
832         kfree(data);
833         return err;
834 }
835
836 int xfrm_register_km(struct xfrm_mgr *km)
837 {
838         write_lock_bh(&xfrm_km_lock);
839         list_add_tail(&km->list, &xfrm_km_list);
840         write_unlock_bh(&xfrm_km_lock);
841         return 0;
842 }
843
844 int xfrm_unregister_km(struct xfrm_mgr *km)
845 {
846         write_lock_bh(&xfrm_km_lock);
847         list_del(&km->list);
848         write_unlock_bh(&xfrm_km_lock);
849         return 0;
850 }
851
852 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
853 {
854         int err = 0;
855         if (unlikely(afinfo == NULL))
856                 return -EINVAL;
857         if (unlikely(afinfo->family >= NPROTO))
858                 return -EAFNOSUPPORT;
859         write_lock(&xfrm_state_afinfo_lock);
860         if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
861                 err = -ENOBUFS;
862         else {
863                 afinfo->state_bydst = xfrm_state_bydst;
864                 afinfo->state_byspi = xfrm_state_byspi;
865                 xfrm_state_afinfo[afinfo->family] = afinfo;
866         }
867         write_unlock(&xfrm_state_afinfo_lock);
868         return err;
869 }
870
871 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
872 {
873         int err = 0;
874         if (unlikely(afinfo == NULL))
875                 return -EINVAL;
876         if (unlikely(afinfo->family >= NPROTO))
877                 return -EAFNOSUPPORT;
878         write_lock(&xfrm_state_afinfo_lock);
879         if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
880                 if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
881                         err = -EINVAL;
882                 else {
883                         xfrm_state_afinfo[afinfo->family] = NULL;
884                         afinfo->state_byspi = NULL;
885                         afinfo->state_bydst = NULL;
886                 }
887         }
888         write_unlock(&xfrm_state_afinfo_lock);
889         return err;
890 }
891
892 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
893 {
894         struct xfrm_state_afinfo *afinfo;
895         if (unlikely(family >= NPROTO))
896                 return NULL;
897         read_lock(&xfrm_state_afinfo_lock);
898         afinfo = xfrm_state_afinfo[family];
899         if (likely(afinfo != NULL))
900                 read_lock(&afinfo->lock);
901         read_unlock(&xfrm_state_afinfo_lock);
902         return afinfo;
903 }
904
905 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
906 {
907         if (unlikely(afinfo == NULL))
908                 return;
909         read_unlock(&afinfo->lock);
910 }
911
912 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
913 void xfrm_state_delete_tunnel(struct xfrm_state *x)
914 {
915         if (x->tunnel) {
916                 struct xfrm_state *t = x->tunnel;
917
918                 if (atomic_read(&t->tunnel_users) == 2)
919                         xfrm_state_delete(t);
920                 atomic_dec(&t->tunnel_users);
921                 xfrm_state_put(t);
922                 x->tunnel = NULL;
923         }
924 }
925
926 void __init xfrm_state_init(void)
927 {
928         int i;
929
930         for (i=0; i<XFRM_DST_HSIZE; i++) {
931                 INIT_LIST_HEAD(&xfrm_state_bydst[i]);
932                 INIT_LIST_HEAD(&xfrm_state_byspi[i]);
933         }
934         INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
935 }
936