1 /* xfrm_user.c: User interface to configure xfrm engine.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/socket.h>
18 #include <linux/string.h>
19 #include <linux/net.h>
20 #include <linux/skbuff.h>
21 #include <linux/netlink.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/pfkeyv2.h>
24 #include <linux/ipsec.h>
25 #include <linux/init.h>
26 #include <linux/security.h>
29 #include <asm/uaccess.h>
31 static struct sock *xfrm_nl;
33 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
35 struct rtattr *rt = xfrma[type - 1];
36 struct xfrm_algo *algp;
41 if ((rt->rta_len - sizeof(*rt)) < sizeof(*algp))
47 if (!algp->alg_key_len &&
48 strcmp(algp->alg_name, "digest_null") != 0)
53 if (!algp->alg_key_len &&
54 strcmp(algp->alg_name, "cipher_null") != 0)
59 /* Zero length keys are legal. */
66 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
70 static int verify_encap_tmpl(struct rtattr **xfrma)
72 struct rtattr *rt = xfrma[XFRMA_ENCAP - 1];
73 struct xfrm_encap_tmpl *encap;
78 if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap))
82 switch (encap->encap_type) {
83 case UDP_ENCAP_ESPINUDP:
84 case UDP_ENCAP_ESPINUDP_NON_IKE:
93 static int verify_newsa_info(struct xfrm_usersa_info *p,
94 struct rtattr **xfrma)
104 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
116 switch (p->id.proto) {
118 if (!xfrma[XFRMA_ALG_AUTH-1] ||
119 xfrma[XFRMA_ALG_CRYPT-1] ||
120 xfrma[XFRMA_ALG_COMP-1])
125 if ((!xfrma[XFRMA_ALG_AUTH-1] &&
126 !xfrma[XFRMA_ALG_CRYPT-1]) ||
127 xfrma[XFRMA_ALG_COMP-1])
132 if (!xfrma[XFRMA_ALG_COMP-1] ||
133 xfrma[XFRMA_ALG_AUTH-1] ||
134 xfrma[XFRMA_ALG_CRYPT-1])
142 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH)))
144 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT)))
146 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP)))
148 if ((err = verify_encap_tmpl(xfrma)))
167 static int attach_one_algo(struct xfrm_algo **algpp, struct rtattr *u_arg)
169 struct rtattr *rta = u_arg;
170 struct xfrm_algo *p, *ualg;
175 ualg = RTA_DATA(rta);
176 p = kmalloc(sizeof(*ualg) + ualg->alg_key_len, GFP_KERNEL);
180 memcpy(p, ualg, sizeof(*ualg) + ualg->alg_key_len);
185 static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg)
187 struct rtattr *rta = u_arg;
188 struct xfrm_encap_tmpl *p, *uencap;
193 uencap = RTA_DATA(rta);
194 p = kmalloc(sizeof(*p), GFP_KERNEL);
198 memcpy(p, uencap, sizeof(*p));
203 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
205 memcpy(&x->id, &p->id, sizeof(x->id));
206 memcpy(&x->sel, &p->sel, sizeof(x->sel));
207 memcpy(&x->lft, &p->lft, sizeof(x->lft));
208 x->props.mode = p->mode;
209 x->props.replay_window = p->replay_window;
210 x->props.reqid = p->reqid;
211 x->props.family = p->family;
212 x->props.saddr = p->saddr;
213 x->props.flags = p->flags;
216 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
217 struct rtattr **xfrma,
220 struct xfrm_state *x = xfrm_state_alloc();
226 copy_from_user_state(x, p);
228 if ((err = attach_one_algo(&x->aalg, xfrma[XFRMA_ALG_AUTH-1])))
230 if ((err = attach_one_algo(&x->ealg, xfrma[XFRMA_ALG_CRYPT-1])))
232 if ((err = attach_one_algo(&x->calg, xfrma[XFRMA_ALG_COMP-1])))
234 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1])))
238 x->type = xfrm_get_type(x->id.proto, x->props.family);
242 err = x->type->init_state(x, NULL);
246 x->curlft.add_time = (unsigned long) xtime.tv_sec;
247 x->km.state = XFRM_STATE_VALID;
253 x->km.state = XFRM_STATE_DEAD;
260 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
262 struct xfrm_usersa_info *p = NLMSG_DATA(nlh);
263 struct xfrm_state *x;
266 err = verify_newsa_info(p, (struct rtattr **) xfrma);
270 x = xfrm_state_construct(p, (struct rtattr **) xfrma, &err);
274 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
275 err = xfrm_state_add(x);
277 err = xfrm_state_update(x);
280 x->km.state = XFRM_STATE_DEAD;
287 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
289 struct xfrm_state *x;
290 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
292 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
296 if (xfrm_state_kern(x)) {
301 xfrm_state_delete(x);
307 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
309 memcpy(&p->id, &x->id, sizeof(p->id));
310 memcpy(&p->sel, &x->sel, sizeof(p->sel));
311 memcpy(&p->lft, &x->lft, sizeof(p->lft));
312 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
313 memcpy(&p->stats, &x->stats, sizeof(p->stats));
314 p->saddr = x->props.saddr;
315 p->mode = x->props.mode;
316 p->replay_window = x->props.replay_window;
317 p->reqid = x->props.reqid;
318 p->family = x->props.family;
319 p->flags = x->props.flags;
323 struct xfrm_dump_info {
324 struct sk_buff *in_skb;
325 struct sk_buff *out_skb;
331 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
333 struct xfrm_dump_info *sp = ptr;
334 struct sk_buff *in_skb = sp->in_skb;
335 struct sk_buff *skb = sp->out_skb;
336 struct xfrm_usersa_info *p;
337 struct nlmsghdr *nlh;
338 unsigned char *b = skb->tail;
340 if (sp->this_idx < sp->start_idx)
343 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
345 XFRM_MSG_NEWSA, sizeof(*p));
346 nlh->nlmsg_flags = 0;
349 copy_to_user_state(x, p);
352 RTA_PUT(skb, XFRMA_ALG_AUTH,
353 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
355 RTA_PUT(skb, XFRMA_ALG_CRYPT,
356 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
358 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
361 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
363 nlh->nlmsg_len = skb->tail - b;
370 skb_trim(skb, b - skb->data);
374 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
376 struct xfrm_dump_info info;
378 info.in_skb = cb->skb;
380 info.nlmsg_seq = cb->nlh->nlmsg_seq;
382 info.start_idx = cb->args[0];
383 (void) xfrm_state_walk(IPSEC_PROTO_ANY, dump_one_state, &info);
384 cb->args[0] = info.this_idx;
389 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
390 struct xfrm_state *x, u32 seq)
392 struct xfrm_dump_info info;
395 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
397 return ERR_PTR(-ENOMEM);
399 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
400 info.in_skb = in_skb;
402 info.nlmsg_seq = seq;
403 info.this_idx = info.start_idx = 0;
405 if (dump_one_state(x, 0, &info)) {
413 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
415 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
416 struct xfrm_state *x;
417 struct sk_buff *resp_skb;
420 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
425 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
426 if (IS_ERR(resp_skb)) {
427 err = PTR_ERR(resp_skb);
429 err = netlink_unicast(xfrm_nl, resp_skb,
430 NETLINK_CB(skb).pid, MSG_DONTWAIT);
437 static int verify_userspi_info(struct xfrm_userspi_info *p)
439 switch (p->info.id.proto) {
445 /* IPCOMP spi is 16-bits. */
446 if (p->max >= 0x10000)
460 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
462 struct xfrm_state *x;
463 struct xfrm_userspi_info *p;
464 struct sk_buff *resp_skb;
468 err = verify_userspi_info(p);
471 x = xfrm_find_acq(p->info.mode, p->info.reqid, p->info.id.proto,
479 resp_skb = ERR_PTR(-ENOENT);
481 spin_lock_bh(&x->lock);
482 if (x->km.state != XFRM_STATE_DEAD) {
483 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
485 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
487 spin_unlock_bh(&x->lock);
489 if (IS_ERR(resp_skb)) {
490 err = PTR_ERR(resp_skb);
494 err = netlink_unicast(xfrm_nl, resp_skb,
495 NETLINK_CB(skb).pid, MSG_DONTWAIT);
503 static int verify_policy_dir(__u8 dir)
507 case XFRM_POLICY_OUT:
508 case XFRM_POLICY_FWD:
518 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
522 case XFRM_SHARE_SESSION:
523 case XFRM_SHARE_USER:
524 case XFRM_SHARE_UNIQUE:
532 case XFRM_POLICY_ALLOW:
533 case XFRM_POLICY_BLOCK:
540 switch (p->sel.family) {
545 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
548 return -EAFNOSUPPORT;
555 return verify_policy_dir(p->dir);
558 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
564 for (i = 0; i < nr; i++, ut++) {
565 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
567 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
568 memcpy(&t->saddr, &ut->saddr,
569 sizeof(xfrm_address_t));
570 t->reqid = ut->reqid;
572 t->share = ut->share;
573 t->optional = ut->optional;
574 t->aalgos = ut->aalgos;
575 t->ealgos = ut->ealgos;
576 t->calgos = ut->calgos;
580 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma)
582 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
583 struct xfrm_user_tmpl *utmpl;
589 nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
591 if (nr > XFRM_MAX_DEPTH)
594 copy_templates(pol, RTA_DATA(rt), nr);
599 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
601 xp->priority = p->priority;
602 xp->index = p->index;
603 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
604 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
605 xp->action = p->action;
606 xp->flags = p->flags;
607 xp->family = p->sel.family;
608 /* XXX xp->share = p->share; */
611 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
613 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
614 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
615 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
616 p->priority = xp->priority;
617 p->index = xp->index;
618 p->sel.family = xp->family;
620 p->action = xp->action;
621 p->flags = xp->flags;
622 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
625 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp)
627 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
635 copy_from_user_policy(xp, p);
636 err = copy_from_user_tmpl(xp, xfrma);
646 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
648 struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh);
649 struct xfrm_policy *xp;
653 err = verify_newpolicy_info(p);
657 xp = xfrm_policy_construct(p, (struct rtattr **) xfrma, &err);
661 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
662 err = xfrm_policy_insert(p->dir, xp, excl);
673 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
675 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
678 if (xp->xfrm_nr == 0)
681 for (i = 0; i < xp->xfrm_nr; i++) {
682 struct xfrm_user_tmpl *up = &vec[i];
683 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
685 memcpy(&up->id, &kp->id, sizeof(up->id));
686 up->family = xp->family;
687 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
688 up->reqid = kp->reqid;
690 up->share = kp->share;
691 up->optional = kp->optional;
692 up->aalgos = kp->aalgos;
693 up->ealgos = kp->ealgos;
694 up->calgos = kp->calgos;
696 RTA_PUT(skb, XFRMA_TMPL,
697 (sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr),
706 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
708 struct xfrm_dump_info *sp = ptr;
709 struct xfrm_userpolicy_info *p;
710 struct sk_buff *in_skb = sp->in_skb;
711 struct sk_buff *skb = sp->out_skb;
712 struct nlmsghdr *nlh;
713 unsigned char *b = skb->tail;
715 if (sp->this_idx < sp->start_idx)
718 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
720 XFRM_MSG_NEWPOLICY, sizeof(*p));
722 nlh->nlmsg_flags = 0;
724 copy_to_user_policy(xp, p, dir);
725 if (copy_to_user_tmpl(xp, skb) < 0)
728 nlh->nlmsg_len = skb->tail - b;
734 skb_trim(skb, b - skb->data);
738 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
740 struct xfrm_dump_info info;
742 info.in_skb = cb->skb;
744 info.nlmsg_seq = cb->nlh->nlmsg_seq;
746 info.start_idx = cb->args[0];
747 (void) xfrm_policy_walk(dump_one_policy, &info);
748 cb->args[0] = info.this_idx;
753 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
754 struct xfrm_policy *xp,
757 struct xfrm_dump_info info;
760 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
762 return ERR_PTR(-ENOMEM);
764 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
765 info.in_skb = in_skb;
767 info.nlmsg_seq = seq;
768 info.this_idx = info.start_idx = 0;
770 if (dump_one_policy(xp, dir, 0, &info) < 0) {
778 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
780 struct xfrm_policy *xp;
781 struct xfrm_userpolicy_id *p;
786 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
788 err = verify_policy_dir(p->dir);
793 xp = xfrm_policy_byid(p->dir, p->index, delete);
795 xp = xfrm_policy_bysel(p->dir, &p->sel, delete);
800 struct sk_buff *resp_skb;
802 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
803 if (IS_ERR(resp_skb)) {
804 err = PTR_ERR(resp_skb);
806 err = netlink_unicast(xfrm_nl, resp_skb,
817 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
819 struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
821 xfrm_state_flush(p->proto);
825 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
831 static const int xfrm_msg_min[(XFRM_MSG_MAX + 1 - XFRM_MSG_BASE)] = {
832 NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* NEW SA */
833 NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* DEL SA */
834 NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* GET SA */
835 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* NEW POLICY */
836 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* DEL POLICY */
837 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* GET POLICY */
838 NLMSG_LENGTH(sizeof(struct xfrm_userspi_info)), /* ALLOC SPI */
839 NLMSG_LENGTH(sizeof(struct xfrm_user_acquire)), /* ACQUIRE */
840 NLMSG_LENGTH(sizeof(struct xfrm_user_expire)), /* EXPIRE */
841 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* UPD POLICY */
842 NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* UPD SA */
843 NLMSG_LENGTH(sizeof(struct xfrm_user_polexpire)), /* POLEXPIRE */
844 NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)), /* FLUSH SA */
845 NLMSG_LENGTH(0), /* FLUSH POLICY */
848 static struct xfrm_link {
849 int (*doit)(struct sk_buff *, struct nlmsghdr *, void **);
850 int (*dump)(struct sk_buff *, struct netlink_callback *);
851 } xfrm_dispatch[] = {
852 { .doit = xfrm_add_sa, },
853 { .doit = xfrm_del_sa, },
856 .dump = xfrm_dump_sa,
858 { .doit = xfrm_add_policy },
859 { .doit = xfrm_get_policy },
861 .doit = xfrm_get_policy,
862 .dump = xfrm_dump_policy,
864 { .doit = xfrm_alloc_userspi },
867 { .doit = xfrm_add_policy },
868 { .doit = xfrm_add_sa, },
870 { .doit = xfrm_flush_sa },
871 { .doit = xfrm_flush_policy },
874 static int xfrm_done(struct netlink_callback *cb)
879 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
881 struct rtattr *xfrma[XFRMA_MAX];
882 struct xfrm_link *link;
885 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
888 type = nlh->nlmsg_type;
890 /* A control message: ignore them */
891 if (type < XFRM_MSG_BASE)
894 /* Unknown message: reply with EINVAL */
895 if (type > XFRM_MSG_MAX)
898 type -= XFRM_MSG_BASE;
899 link = &xfrm_dispatch[type];
901 /* All operations require privileges, even GET */
902 if (security_netlink_recv(skb)) {
907 if ((type == 2 || type == 5) && (nlh->nlmsg_flags & NLM_F_DUMP)) {
910 if (link->dump == NULL)
913 if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh,
918 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
925 memset(xfrma, 0, sizeof(xfrma));
927 if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type]))
930 if (nlh->nlmsg_len > min_len) {
931 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
932 struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len);
934 while (RTA_OK(attr, attrlen)) {
935 unsigned short flavor = attr->rta_type;
937 if (flavor > XFRMA_MAX)
939 xfrma[flavor - 1] = attr;
941 attr = RTA_NEXT(attr, attrlen);
945 if (link->doit == NULL)
947 *errp = link->doit(skb, nlh, (void **) &xfrma);
956 static int xfrm_user_rcv_skb(struct sk_buff *skb)
959 struct nlmsghdr *nlh;
961 while (skb->len >= NLMSG_SPACE(0)) {
964 nlh = (struct nlmsghdr *) skb->data;
965 if (nlh->nlmsg_len < sizeof(*nlh) ||
966 skb->len < nlh->nlmsg_len)
968 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
971 if (xfrm_user_rcv_msg(skb, nlh, &err) < 0) {
974 netlink_ack(skb, nlh, err);
975 } else if (nlh->nlmsg_flags & NLM_F_ACK)
976 netlink_ack(skb, nlh, 0);
983 static void xfrm_netlink_rcv(struct sock *sk, int len)
990 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
991 if (xfrm_user_rcv_skb(skb)) {
993 skb_queue_head(&sk->sk_receive_queue,
1004 } while (xfrm_nl && xfrm_nl->sk_receive_queue.qlen);
1007 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard)
1009 struct xfrm_user_expire *ue;
1010 struct nlmsghdr *nlh;
1011 unsigned char *b = skb->tail;
1013 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_EXPIRE,
1015 ue = NLMSG_DATA(nlh);
1016 nlh->nlmsg_flags = 0;
1018 copy_to_user_state(x, &ue->state);
1019 ue->hard = (hard != 0) ? 1 : 0;
1021 nlh->nlmsg_len = skb->tail - b;
1025 skb_trim(skb, b - skb->data);
1029 static int xfrm_send_state_notify(struct xfrm_state *x, int hard)
1031 struct sk_buff *skb;
1033 skb = alloc_skb(sizeof(struct xfrm_user_expire) + 16, GFP_ATOMIC);
1037 if (build_expire(skb, x, hard) < 0)
1040 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
1042 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1045 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
1046 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
1049 struct xfrm_user_acquire *ua;
1050 struct nlmsghdr *nlh;
1051 unsigned char *b = skb->tail;
1052 __u32 seq = xfrm_get_acqseq();
1054 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE,
1056 ua = NLMSG_DATA(nlh);
1057 nlh->nlmsg_flags = 0;
1059 memcpy(&ua->id, &x->id, sizeof(ua->id));
1060 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
1061 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
1062 copy_to_user_policy(xp, &ua->policy, dir);
1063 ua->aalgos = xt->aalgos;
1064 ua->ealgos = xt->ealgos;
1065 ua->calgos = xt->calgos;
1066 ua->seq = x->km.seq = seq;
1068 if (copy_to_user_tmpl(xp, skb) < 0)
1071 nlh->nlmsg_len = skb->tail - b;
1075 skb_trim(skb, b - skb->data);
1079 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
1080 struct xfrm_policy *xp, int dir)
1082 struct sk_buff *skb;
1085 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1086 len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire));
1087 skb = alloc_skb(len, GFP_ATOMIC);
1091 if (build_acquire(skb, x, xt, xp, dir) < 0)
1094 NETLINK_CB(skb).dst_groups = XFRMGRP_ACQUIRE;
1096 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_ACQUIRE, GFP_ATOMIC);
1099 /* User gives us xfrm_user_policy_info followed by an array of 0
1100 * or more templates.
1102 struct xfrm_policy *xfrm_compile_policy(u16 family, int opt,
1103 u8 *data, int len, int *dir)
1105 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
1106 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
1107 struct xfrm_policy *xp;
1112 if (opt != IP_XFRM_POLICY) {
1117 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1119 if (opt != IPV6_XFRM_POLICY) {
1132 if (len < sizeof(*p) ||
1133 verify_newpolicy_info(p))
1136 nr = ((len - sizeof(*p)) / sizeof(*ut));
1137 if (nr > XFRM_MAX_DEPTH)
1140 xp = xfrm_policy_alloc(GFP_KERNEL);
1146 copy_from_user_policy(xp, p);
1147 copy_templates(xp, ut, nr);
1154 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
1157 struct xfrm_user_polexpire *upe;
1158 struct nlmsghdr *nlh;
1159 unsigned char *b = skb->tail;
1161 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe));
1162 upe = NLMSG_DATA(nlh);
1163 nlh->nlmsg_flags = 0;
1165 copy_to_user_policy(xp, &upe->pol, dir);
1166 if (copy_to_user_tmpl(xp, skb) < 0)
1170 nlh->nlmsg_len = skb->tail - b;
1174 skb_trim(skb, b - skb->data);
1178 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, int hard)
1180 struct sk_buff *skb;
1183 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1184 len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire));
1185 skb = alloc_skb(len, GFP_ATOMIC);
1189 if (build_polexpire(skb, xp, dir, hard) < 0)
1192 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
1194 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1197 static struct xfrm_mgr netlink_mgr = {
1199 .notify = xfrm_send_state_notify,
1200 .acquire = xfrm_send_acquire,
1201 .compile_policy = xfrm_compile_policy,
1202 .notify_policy = xfrm_send_policy_notify,
1205 static int __init xfrm_user_init(void)
1207 printk(KERN_INFO "Initializing IPsec netlink socket\n");
1209 xfrm_nl = netlink_kernel_create(NETLINK_XFRM, xfrm_netlink_rcv);
1210 if (xfrm_nl == NULL)
1211 panic("xfrm_user_init: cannot initialize xfrm_nl\n");
1214 xfrm_register_km(&netlink_mgr);
1219 static void __exit xfrm_user_exit(void)
1221 xfrm_unregister_km(&netlink_mgr);
1222 sock_release(xfrm_nl->sk_socket);
1225 module_init(xfrm_user_init);
1226 module_exit(xfrm_user_exit);
1227 MODULE_LICENSE("GPL");