1 /* xfrm_user.c: User interface to configure xfrm engine.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/socket.h>
18 #include <linux/string.h>
19 #include <linux/net.h>
20 #include <linux/skbuff.h>
21 #include <linux/netlink.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/pfkeyv2.h>
24 #include <linux/ipsec.h>
25 #include <linux/init.h>
26 #include <linux/security.h>
29 #include <asm/uaccess.h>
31 static struct sock *xfrm_nl;
33 static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
35 struct rtattr *rt = xfrma[type - 1];
36 struct xfrm_algo *algp;
41 if ((rt->rta_len - sizeof(*rt)) < sizeof(*algp))
47 if (!algp->alg_key_len &&
48 strcmp(algp->alg_name, "digest_null") != 0)
53 if (!algp->alg_key_len &&
54 strcmp(algp->alg_name, "cipher_null") != 0)
59 /* Zero length keys are legal. */
66 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0';
70 static int verify_encap_tmpl(struct rtattr **xfrma)
72 struct rtattr *rt = xfrma[XFRMA_ENCAP - 1];
73 struct xfrm_encap_tmpl *encap;
78 if ((rt->rta_len - sizeof(*rt)) < sizeof(*encap))
82 switch (encap->encap_type) {
83 case UDP_ENCAP_ESPINUDP:
84 case UDP_ENCAP_ESPINUDP_NON_IKE:
93 static int verify_newsa_info(struct xfrm_usersa_info *p,
94 struct rtattr **xfrma)
104 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
116 switch (p->id.proto) {
118 if (!xfrma[XFRMA_ALG_AUTH-1] ||
119 xfrma[XFRMA_ALG_CRYPT-1] ||
120 xfrma[XFRMA_ALG_COMP-1])
125 if ((!xfrma[XFRMA_ALG_AUTH-1] &&
126 !xfrma[XFRMA_ALG_CRYPT-1]) ||
127 xfrma[XFRMA_ALG_COMP-1])
132 if (!xfrma[XFRMA_ALG_COMP-1] ||
133 xfrma[XFRMA_ALG_AUTH-1] ||
134 xfrma[XFRMA_ALG_CRYPT-1])
142 if ((err = verify_one_alg(xfrma, XFRMA_ALG_AUTH)))
144 if ((err = verify_one_alg(xfrma, XFRMA_ALG_CRYPT)))
146 if ((err = verify_one_alg(xfrma, XFRMA_ALG_COMP)))
148 if ((err = verify_encap_tmpl(xfrma)))
167 static int attach_one_algo(struct xfrm_algo **algpp, struct rtattr *u_arg)
169 struct rtattr *rta = u_arg;
170 struct xfrm_algo *p, *ualg;
175 ualg = RTA_DATA(rta);
176 p = kmalloc(sizeof(*ualg) + ualg->alg_key_len, GFP_KERNEL);
180 memcpy(p, ualg, sizeof(*ualg) + ualg->alg_key_len);
185 static int attach_encap_tmpl(struct xfrm_encap_tmpl **encapp, struct rtattr *u_arg)
187 struct rtattr *rta = u_arg;
188 struct xfrm_encap_tmpl *p, *uencap;
193 uencap = RTA_DATA(rta);
194 p = kmalloc(sizeof(*p), GFP_KERNEL);
198 memcpy(p, uencap, sizeof(*p));
203 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
205 memcpy(&x->id, &p->id, sizeof(x->id));
206 memcpy(&x->sel, &p->sel, sizeof(x->sel));
207 memcpy(&x->lft, &p->lft, sizeof(x->lft));
208 x->props.mode = p->mode;
209 x->props.replay_window = p->replay_window;
210 x->props.reqid = p->reqid;
211 x->props.family = p->family;
212 x->props.saddr = p->saddr;
213 x->props.flags = p->flags;
216 static struct xfrm_state *xfrm_state_construct(struct xfrm_usersa_info *p,
217 struct rtattr **xfrma,
220 struct xfrm_state *x = xfrm_state_alloc();
226 copy_from_user_state(x, p);
228 if ((err = attach_one_algo(&x->aalg, xfrma[XFRMA_ALG_AUTH-1])))
230 if ((err = attach_one_algo(&x->ealg, xfrma[XFRMA_ALG_CRYPT-1])))
232 if ((err = attach_one_algo(&x->calg, xfrma[XFRMA_ALG_COMP-1])))
234 if ((err = attach_encap_tmpl(&x->encap, xfrma[XFRMA_ENCAP-1])))
238 x->type = xfrm_get_type(x->id.proto, x->props.family);
242 err = x->type->init_state(x, NULL);
246 x->curlft.add_time = (unsigned long) xtime.tv_sec;
247 x->km.state = XFRM_STATE_VALID;
253 x->km.state = XFRM_STATE_DEAD;
260 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
262 struct xfrm_usersa_info *p = NLMSG_DATA(nlh);
263 struct xfrm_state *x;
266 err = verify_newsa_info(p, (struct rtattr **) xfrma);
272 x = xfrm_state_construct(p, (struct rtattr **) xfrma, &err);
276 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
277 err = xfrm_state_add(x);
279 err = xfrm_state_update(x);
282 x->km.state = XFRM_STATE_DEAD;
289 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
291 struct xfrm_state *x;
292 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
294 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
298 if (xfrm_state_kern(x)) {
303 xfrm_state_delete(x);
309 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
311 memcpy(&p->id, &x->id, sizeof(p->id));
312 memcpy(&p->sel, &x->sel, sizeof(p->sel));
313 memcpy(&p->lft, &x->lft, sizeof(p->lft));
314 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
315 memcpy(&p->stats, &x->stats, sizeof(p->stats));
316 p->saddr = x->props.saddr;
317 p->mode = x->props.mode;
318 p->replay_window = x->props.replay_window;
319 p->reqid = x->props.reqid;
320 p->family = x->props.family;
321 p->flags = x->props.flags;
325 struct xfrm_dump_info {
326 struct sk_buff *in_skb;
327 struct sk_buff *out_skb;
333 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
335 struct xfrm_dump_info *sp = ptr;
336 struct sk_buff *in_skb = sp->in_skb;
337 struct sk_buff *skb = sp->out_skb;
338 struct xfrm_usersa_info *p;
339 struct nlmsghdr *nlh;
340 unsigned char *b = skb->tail;
342 if (sp->this_idx < sp->start_idx)
345 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
347 XFRM_MSG_NEWSA, sizeof(*p));
348 nlh->nlmsg_flags = 0;
351 copy_to_user_state(x, p);
354 RTA_PUT(skb, XFRMA_ALG_AUTH,
355 sizeof(*(x->aalg))+(x->aalg->alg_key_len+7)/8, x->aalg);
357 RTA_PUT(skb, XFRMA_ALG_CRYPT,
358 sizeof(*(x->ealg))+(x->ealg->alg_key_len+7)/8, x->ealg);
360 RTA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
363 RTA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
365 nlh->nlmsg_len = skb->tail - b;
372 skb_trim(skb, b - skb->data);
376 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
378 struct xfrm_dump_info info;
380 info.in_skb = cb->skb;
382 info.nlmsg_seq = cb->nlh->nlmsg_seq;
384 info.start_idx = cb->args[0];
385 (void) xfrm_state_walk(IPSEC_PROTO_ANY, dump_one_state, &info);
386 cb->args[0] = info.this_idx;
391 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
392 struct xfrm_state *x, u32 seq)
394 struct xfrm_dump_info info;
397 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
399 return ERR_PTR(-ENOMEM);
401 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
402 info.in_skb = in_skb;
404 info.nlmsg_seq = seq;
405 info.this_idx = info.start_idx = 0;
407 if (dump_one_state(x, 0, &info)) {
415 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
417 struct xfrm_usersa_id *p = NLMSG_DATA(nlh);
418 struct xfrm_state *x;
419 struct sk_buff *resp_skb;
422 x = xfrm_state_lookup(&p->daddr, p->spi, p->proto, p->family);
427 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
428 if (IS_ERR(resp_skb)) {
429 err = PTR_ERR(resp_skb);
431 err = netlink_unicast(xfrm_nl, resp_skb,
432 NETLINK_CB(skb).pid, MSG_DONTWAIT);
439 static int verify_userspi_info(struct xfrm_userspi_info *p)
441 switch (p->info.id.proto) {
447 /* IPCOMP spi is 16-bits. */
448 if (p->max >= 0x10000)
462 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
464 struct xfrm_state *x;
465 struct xfrm_userspi_info *p;
466 struct sk_buff *resp_skb;
470 err = verify_userspi_info(p);
473 x = xfrm_find_acq(p->info.mode, p->info.reqid, p->info.id.proto,
481 resp_skb = ERR_PTR(-ENOENT);
483 spin_lock_bh(&x->lock);
484 if (x->km.state != XFRM_STATE_DEAD) {
485 xfrm_alloc_spi(x, htonl(p->min), htonl(p->max));
487 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
489 spin_unlock_bh(&x->lock);
491 if (IS_ERR(resp_skb)) {
492 err = PTR_ERR(resp_skb);
496 err = netlink_unicast(xfrm_nl, resp_skb,
497 NETLINK_CB(skb).pid, MSG_DONTWAIT);
505 static int verify_policy_dir(__u8 dir)
509 case XFRM_POLICY_OUT:
510 case XFRM_POLICY_FWD:
520 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
524 case XFRM_SHARE_SESSION:
525 case XFRM_SHARE_USER:
526 case XFRM_SHARE_UNIQUE:
534 case XFRM_POLICY_ALLOW:
535 case XFRM_POLICY_BLOCK:
542 switch (p->sel.family) {
547 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
550 return -EAFNOSUPPORT;
557 return verify_policy_dir(p->dir);
560 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
566 for (i = 0; i < nr; i++, ut++) {
567 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
569 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
570 memcpy(&t->saddr, &ut->saddr,
571 sizeof(xfrm_address_t));
572 t->reqid = ut->reqid;
574 t->share = ut->share;
575 t->optional = ut->optional;
576 t->aalgos = ut->aalgos;
577 t->ealgos = ut->ealgos;
578 t->calgos = ut->calgos;
582 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct rtattr **xfrma)
584 struct rtattr *rt = xfrma[XFRMA_TMPL-1];
585 struct xfrm_user_tmpl *utmpl;
591 nr = (rt->rta_len - sizeof(*rt)) / sizeof(*utmpl);
593 if (nr > XFRM_MAX_DEPTH)
596 copy_templates(pol, RTA_DATA(rt), nr);
601 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
603 xp->priority = p->priority;
604 xp->index = p->index;
605 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
606 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
607 xp->action = p->action;
608 xp->flags = p->flags;
609 xp->family = p->sel.family;
610 /* XXX xp->share = p->share; */
613 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
615 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
616 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
617 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
618 p->priority = xp->priority;
619 p->index = xp->index;
620 p->sel.family = xp->family;
622 p->action = xp->action;
623 p->flags = xp->flags;
624 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
627 static struct xfrm_policy *xfrm_policy_construct(struct xfrm_userpolicy_info *p, struct rtattr **xfrma, int *errp)
629 struct xfrm_policy *xp = xfrm_policy_alloc(GFP_KERNEL);
637 copy_from_user_policy(xp, p);
638 err = copy_from_user_tmpl(xp, xfrma);
648 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
650 struct xfrm_userpolicy_info *p = NLMSG_DATA(nlh);
651 struct xfrm_policy *xp;
655 err = verify_newpolicy_info(p);
659 xp = xfrm_policy_construct(p, (struct rtattr **) xfrma, &err);
663 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
664 err = xfrm_policy_insert(p->dir, xp, excl);
675 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
677 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
680 if (xp->xfrm_nr == 0)
683 for (i = 0; i < xp->xfrm_nr; i++) {
684 struct xfrm_user_tmpl *up = &vec[i];
685 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
687 memcpy(&up->id, &kp->id, sizeof(up->id));
688 up->family = xp->family;
689 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
690 up->reqid = kp->reqid;
692 up->share = kp->share;
693 up->optional = kp->optional;
694 up->aalgos = kp->aalgos;
695 up->ealgos = kp->ealgos;
696 up->calgos = kp->calgos;
698 RTA_PUT(skb, XFRMA_TMPL,
699 (sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr),
708 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
710 struct xfrm_dump_info *sp = ptr;
711 struct xfrm_userpolicy_info *p;
712 struct sk_buff *in_skb = sp->in_skb;
713 struct sk_buff *skb = sp->out_skb;
714 struct nlmsghdr *nlh;
715 unsigned char *b = skb->tail;
717 if (sp->this_idx < sp->start_idx)
720 nlh = NLMSG_PUT(skb, NETLINK_CB(in_skb).pid,
722 XFRM_MSG_NEWPOLICY, sizeof(*p));
724 nlh->nlmsg_flags = 0;
726 copy_to_user_policy(xp, p, dir);
727 if (copy_to_user_tmpl(xp, skb) < 0)
730 nlh->nlmsg_len = skb->tail - b;
736 skb_trim(skb, b - skb->data);
740 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
742 struct xfrm_dump_info info;
744 info.in_skb = cb->skb;
746 info.nlmsg_seq = cb->nlh->nlmsg_seq;
748 info.start_idx = cb->args[0];
749 (void) xfrm_policy_walk(dump_one_policy, &info);
750 cb->args[0] = info.this_idx;
755 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
756 struct xfrm_policy *xp,
759 struct xfrm_dump_info info;
762 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
764 return ERR_PTR(-ENOMEM);
766 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid;
767 info.in_skb = in_skb;
769 info.nlmsg_seq = seq;
770 info.this_idx = info.start_idx = 0;
772 if (dump_one_policy(xp, dir, 0, &info) < 0) {
780 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
782 struct xfrm_policy *xp;
783 struct xfrm_userpolicy_id *p;
788 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
790 err = verify_policy_dir(p->dir);
795 xp = xfrm_policy_byid(p->dir, p->index, delete);
797 xp = xfrm_policy_bysel(p->dir, &p->sel, delete);
802 struct sk_buff *resp_skb;
804 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
805 if (IS_ERR(resp_skb)) {
806 err = PTR_ERR(resp_skb);
808 err = netlink_unicast(xfrm_nl, resp_skb,
819 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
821 struct xfrm_usersa_flush *p = NLMSG_DATA(nlh);
823 xfrm_state_flush(p->proto);
827 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **xfrma)
833 static const int xfrm_msg_min[(XFRM_MSG_MAX + 1 - XFRM_MSG_BASE)] = {
834 NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* NEW SA */
835 NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* DEL SA */
836 NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* GET SA */
837 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* NEW POLICY */
838 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* DEL POLICY */
839 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* GET POLICY */
840 NLMSG_LENGTH(sizeof(struct xfrm_userspi_info)), /* ALLOC SPI */
841 NLMSG_LENGTH(sizeof(struct xfrm_user_acquire)), /* ACQUIRE */
842 NLMSG_LENGTH(sizeof(struct xfrm_user_expire)), /* EXPIRE */
843 NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* UPD POLICY */
844 NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* UPD SA */
845 NLMSG_LENGTH(sizeof(struct xfrm_user_polexpire)), /* POLEXPIRE */
846 NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)), /* FLUSH SA */
847 NLMSG_LENGTH(0), /* FLUSH POLICY */
850 static struct xfrm_link {
851 int (*doit)(struct sk_buff *, struct nlmsghdr *, void **);
852 int (*dump)(struct sk_buff *, struct netlink_callback *);
853 } xfrm_dispatch[] = {
854 { .doit = xfrm_add_sa, },
855 { .doit = xfrm_del_sa, },
858 .dump = xfrm_dump_sa,
860 { .doit = xfrm_add_policy },
861 { .doit = xfrm_get_policy },
863 .doit = xfrm_get_policy,
864 .dump = xfrm_dump_policy,
866 { .doit = xfrm_alloc_userspi },
869 { .doit = xfrm_add_policy },
870 { .doit = xfrm_add_sa, },
872 { .doit = xfrm_flush_sa },
873 { .doit = xfrm_flush_policy },
876 static int xfrm_done(struct netlink_callback *cb)
881 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
883 struct rtattr *xfrma[XFRMA_MAX];
884 struct xfrm_link *link;
887 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
890 type = nlh->nlmsg_type;
892 /* A control message: ignore them */
893 if (type < XFRM_MSG_BASE)
896 /* Unknown message: reply with EINVAL */
897 if (type > XFRM_MSG_MAX)
900 type -= XFRM_MSG_BASE;
901 link = &xfrm_dispatch[type];
903 /* All operations require privileges, even GET */
904 if (security_netlink_recv(skb)) {
909 if ((type == 2 || type == 5) && (nlh->nlmsg_flags & NLM_F_DUMP)) {
912 if (link->dump == NULL)
915 if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh,
920 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
927 memset(xfrma, 0, sizeof(xfrma));
929 if (nlh->nlmsg_len < (min_len = xfrm_msg_min[type]))
932 if (nlh->nlmsg_len > min_len) {
933 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
934 struct rtattr *attr = (void *) nlh + NLMSG_ALIGN(min_len);
936 while (RTA_OK(attr, attrlen)) {
937 unsigned short flavor = attr->rta_type;
939 if (flavor > XFRMA_MAX)
941 xfrma[flavor - 1] = attr;
943 attr = RTA_NEXT(attr, attrlen);
947 if (link->doit == NULL)
949 *errp = link->doit(skb, nlh, (void **) &xfrma);
958 static int xfrm_user_rcv_skb(struct sk_buff *skb)
961 struct nlmsghdr *nlh;
963 while (skb->len >= NLMSG_SPACE(0)) {
966 nlh = (struct nlmsghdr *) skb->data;
967 if (nlh->nlmsg_len < sizeof(*nlh) ||
968 skb->len < nlh->nlmsg_len)
970 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
973 if (xfrm_user_rcv_msg(skb, nlh, &err) < 0) {
976 netlink_ack(skb, nlh, err);
977 } else if (nlh->nlmsg_flags & NLM_F_ACK)
978 netlink_ack(skb, nlh, 0);
985 static void xfrm_netlink_rcv(struct sock *sk, int len)
992 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
993 if (xfrm_user_rcv_skb(skb)) {
995 skb_queue_head(&sk->sk_receive_queue,
1006 } while (xfrm_nl && xfrm_nl->sk_receive_queue.qlen);
1009 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard)
1011 struct xfrm_user_expire *ue;
1012 struct nlmsghdr *nlh;
1013 unsigned char *b = skb->tail;
1015 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_EXPIRE,
1017 ue = NLMSG_DATA(nlh);
1018 nlh->nlmsg_flags = 0;
1020 copy_to_user_state(x, &ue->state);
1021 ue->hard = (hard != 0) ? 1 : 0;
1023 nlh->nlmsg_len = skb->tail - b;
1027 skb_trim(skb, b - skb->data);
1031 static int xfrm_send_state_notify(struct xfrm_state *x, int hard)
1033 struct sk_buff *skb;
1035 skb = alloc_skb(sizeof(struct xfrm_user_expire) + 16, GFP_ATOMIC);
1039 if (build_expire(skb, x, hard) < 0)
1042 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
1044 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1047 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
1048 struct xfrm_tmpl *xt, struct xfrm_policy *xp,
1051 struct xfrm_user_acquire *ua;
1052 struct nlmsghdr *nlh;
1053 unsigned char *b = skb->tail;
1054 __u32 seq = xfrm_get_acqseq();
1056 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE,
1058 ua = NLMSG_DATA(nlh);
1059 nlh->nlmsg_flags = 0;
1061 memcpy(&ua->id, &x->id, sizeof(ua->id));
1062 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
1063 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
1064 copy_to_user_policy(xp, &ua->policy, dir);
1065 ua->aalgos = xt->aalgos;
1066 ua->ealgos = xt->ealgos;
1067 ua->calgos = xt->calgos;
1068 ua->seq = x->km.seq = seq;
1070 if (copy_to_user_tmpl(xp, skb) < 0)
1073 nlh->nlmsg_len = skb->tail - b;
1077 skb_trim(skb, b - skb->data);
1081 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
1082 struct xfrm_policy *xp, int dir)
1084 struct sk_buff *skb;
1087 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1088 len += NLMSG_SPACE(sizeof(struct xfrm_user_acquire));
1089 skb = alloc_skb(len, GFP_ATOMIC);
1093 if (build_acquire(skb, x, xt, xp, dir) < 0)
1096 NETLINK_CB(skb).dst_groups = XFRMGRP_ACQUIRE;
1098 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_ACQUIRE, GFP_ATOMIC);
1101 /* User gives us xfrm_user_policy_info followed by an array of 0
1102 * or more templates.
1104 struct xfrm_policy *xfrm_compile_policy(u16 family, int opt,
1105 u8 *data, int len, int *dir)
1107 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
1108 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
1109 struct xfrm_policy *xp;
1114 if (opt != IP_XFRM_POLICY) {
1119 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1121 if (opt != IPV6_XFRM_POLICY) {
1134 if (len < sizeof(*p) ||
1135 verify_newpolicy_info(p))
1138 nr = ((len - sizeof(*p)) / sizeof(*ut));
1139 if (nr > XFRM_MAX_DEPTH)
1142 xp = xfrm_policy_alloc(GFP_KERNEL);
1148 copy_from_user_policy(xp, p);
1149 copy_templates(xp, ut, nr);
1156 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
1159 struct xfrm_user_polexpire *upe;
1160 struct nlmsghdr *nlh;
1161 unsigned char *b = skb->tail;
1163 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe));
1164 upe = NLMSG_DATA(nlh);
1165 nlh->nlmsg_flags = 0;
1167 copy_to_user_policy(xp, &upe->pol, dir);
1168 if (copy_to_user_tmpl(xp, skb) < 0)
1172 nlh->nlmsg_len = skb->tail - b;
1176 skb_trim(skb, b - skb->data);
1180 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, int hard)
1182 struct sk_buff *skb;
1185 len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
1186 len += NLMSG_SPACE(sizeof(struct xfrm_user_polexpire));
1187 skb = alloc_skb(len, GFP_ATOMIC);
1191 if (build_polexpire(skb, xp, dir, hard) < 0)
1194 NETLINK_CB(skb).dst_groups = XFRMGRP_EXPIRE;
1196 return netlink_broadcast(xfrm_nl, skb, 0, XFRMGRP_EXPIRE, GFP_ATOMIC);
1199 static struct xfrm_mgr netlink_mgr = {
1201 .notify = xfrm_send_state_notify,
1202 .acquire = xfrm_send_acquire,
1203 .compile_policy = xfrm_compile_policy,
1204 .notify_policy = xfrm_send_policy_notify,
1207 static int __init xfrm_user_init(void)
1209 printk(KERN_INFO "Initializing IPsec netlink socket\n");
1211 xfrm_nl = netlink_kernel_create(NETLINK_XFRM, xfrm_netlink_rcv);
1212 if (xfrm_nl == NULL)
1213 panic("xfrm_user_init: cannot initialize xfrm_nl\n");
1216 xfrm_register_km(&netlink_mgr);
1221 static void __exit xfrm_user_exit(void)
1223 xfrm_unregister_km(&netlink_mgr);
1224 sock_release(xfrm_nl->sk_socket);
1227 module_init(xfrm_user_init);
1228 module_exit(xfrm_user_exit);
1229 MODULE_LICENSE("GPL");