*
*/
+#include <asm/bug.h>
#include <linux/config.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
+#include <linux/module.h>
#include <net/xfrm.h>
#include <net/ip.h>
DECLARE_MUTEX(xfrm_cfg_sem);
+EXPORT_SYMBOL(xfrm_cfg_sem);
-static rwlock_t xfrm_policy_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(xfrm_policy_lock);
struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2];
+EXPORT_SYMBOL(xfrm_policy_list);
-static rwlock_t xfrm_policy_afinfo_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
-kmem_cache_t *xfrm_dst_cache;
+static kmem_cache_t *xfrm_dst_cache;
static struct work_struct xfrm_policy_gc_work;
static struct list_head xfrm_policy_gc_list =
LIST_HEAD_INIT(xfrm_policy_gc_list);
-static spinlock_t xfrm_policy_gc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
+
+static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
+static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
int xfrm_register_type(struct xfrm_type *type, unsigned short family)
{
xfrm_policy_put_afinfo(afinfo);
return err;
}
+EXPORT_SYMBOL(xfrm_register_type);
int xfrm_unregister_type(struct xfrm_type *type, unsigned short family)
{
xfrm_policy_put_afinfo(afinfo);
return err;
}
+EXPORT_SYMBOL(xfrm_unregister_type);
struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
{
xfrm_policy_put_afinfo(afinfo);
return type;
}
+EXPORT_SYMBOL(xfrm_get_type);
int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl,
unsigned short family)
xfrm_policy_put_afinfo(afinfo);
return err;
}
+EXPORT_SYMBOL(xfrm_dst_lookup);
void xfrm_put_type(struct xfrm_type *type)
{
int warn = 0;
int dir;
+ read_lock(&xp->lock);
+
if (xp->dead)
goto out;
xfrm_pol_hold(xp);
out:
+ read_unlock(&xp->lock);
xfrm_pol_put(xp);
return;
expired:
+ read_unlock(&xp->lock);
km_policy_expired(xp, dir, 1);
xfrm_policy_delete(xp, dir);
xfrm_pol_put(xp);
if (policy) {
memset(policy, 0, sizeof(struct xfrm_policy));
atomic_set(&policy->refcnt, 1);
- policy->lock = RW_LOCK_UNLOCKED;
+ rwlock_init(&policy->lock);
init_timer(&policy->timer);
policy->timer.data = (unsigned long)policy;
policy->timer.function = xfrm_policy_timer;
}
return policy;
}
+EXPORT_SYMBOL(xfrm_policy_alloc);
/* Destroy xfrm_policy: descendant resources must be released to this moment. */
kfree(policy);
}
+EXPORT_SYMBOL(__xfrm_policy_destroy);
static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
{
* entry dead. The rule must be unlinked from lists to the moment.
*/
-void xfrm_policy_kill(struct xfrm_policy *policy)
+static void xfrm_policy_kill(struct xfrm_policy *policy)
{
- write_lock_bh(&policy->lock);
- if (policy->dead)
- goto out;
+ int dead;
+ write_lock_bh(&policy->lock);
+ dead = policy->dead;
policy->dead = 1;
+ write_unlock_bh(&policy->lock);
+
+ if (unlikely(dead)) {
+ WARN_ON(1);
+ return;
+ }
spin_lock(&xfrm_policy_gc_lock);
list_add(&policy->list, &xfrm_policy_gc_list);
spin_unlock(&xfrm_policy_gc_lock);
- schedule_work(&xfrm_policy_gc_work);
-out:
- write_unlock_bh(&policy->lock);
+ schedule_work(&xfrm_policy_gc_work);
}
/* Generate new index... KAME seems to generate them ordered by cost
struct xfrm_policy **newpos = NULL;
write_lock_bh(&xfrm_policy_lock);
- for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
+ for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) {
if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0) {
if (excl) {
write_unlock_bh(&xfrm_policy_lock);
delpol = pol;
if (policy->priority > pol->priority)
continue;
- } else if (policy->priority >= pol->priority)
+ } else if (policy->priority >= pol->priority) {
+ p = &pol->next;
continue;
+ }
if (!newpos)
newpos = p;
if (delpol)
break;
+ p = &pol->next;
}
if (newpos)
p = newpos;
}
return 0;
}
+EXPORT_SYMBOL(xfrm_policy_insert);
struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
int delete)
}
return pol;
}
+EXPORT_SYMBOL(xfrm_policy_bysel);
struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
{
}
return pol;
}
+EXPORT_SYMBOL(xfrm_policy_byid);
void xfrm_policy_flush(void)
{
atomic_inc(&flow_cache_genid);
write_unlock_bh(&xfrm_policy_lock);
}
+EXPORT_SYMBOL(xfrm_policy_flush);
int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*),
void *data)
read_unlock_bh(&xfrm_policy_lock);
return error;
}
-
+EXPORT_SYMBOL(xfrm_policy_walk);
/* Find policy to apply to this flow. */
*obj_refp = &pol->refcnt;
}
-struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
+static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
{
struct xfrm_policy *pol;
write_lock_bh(&xfrm_policy_lock);
pol = __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
- if (pol)
+ if (pol) {
+ if (dir < XFRM_POLICY_MAX)
+ atomic_inc(&flow_cache_genid);
xfrm_policy_kill(pol);
+ }
}
int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
{
struct xfrm_policy *policy;
struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
- struct rtable *rt = (struct rtable*)*dst_p;
- struct dst_entry *dst;
+ struct dst_entry *dst, *dst_orig = *dst_p;
int nx = 0;
int err;
u32 genid;
- u16 family = (*dst_p)->ops->family;
-
- switch (family) {
- case AF_INET:
- if (!fl->fl4_src)
- fl->fl4_src = rt->rt_src;
- if (!fl->fl4_dst)
- fl->fl4_dst = rt->rt_dst;
- case AF_INET6:
- /* Still not clear... */
- default:
- /* nothing */;
- }
-
+ u16 family = dst_orig->ops->family;
restart:
genid = atomic_read(&flow_cache_genid);
policy = NULL;
if (!policy) {
/* To accelerate a bit... */
- if ((rt->u.dst.flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
+ if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT])
return 0;
policy = flow_cache_lookup(fl, family,
return 0;
}
- dst = &rt->u.dst;
+ dst = dst_orig;
err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family);
if (unlikely(err)) {
write_unlock_bh(&policy->lock);
}
*dst_p = dst;
- ip_rt_put(rt);
+ dst_release(dst_orig);
xfrm_pol_put(policy);
return 0;
error:
- ip_rt_put(rt);
+ dst_release(dst_orig);
xfrm_pol_put(policy);
*dst_p = NULL;
return err;
}
+EXPORT_SYMBOL(xfrm_lookup);
/* When skb is transformed back to its "native" form, we have to
* check policy restrictions. At the moment we make this in maximally
return 0;
}
+static inline int secpath_has_tunnel(struct sec_path *sp, int k)
+{
+ for (; k < sp->len; k++) {
+ if (sp->x[k].xvec->props.mode)
+ return 1;
+ }
+
+ return 0;
+}
+
int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
unsigned short family)
{
xfrm_policy_lookup);
if (!pol)
- return !skb->sp;
+ return !skb->sp || !secpath_has_tunnel(skb->sp, 0);
pol->curlft.use_time = (unsigned long)xtime.tv_sec;
goto reject;
}
- for (; k < sp->len; k++) {
- if (sp->x[k].xvec->props.mode)
- goto reject;
- }
+ if (secpath_has_tunnel(sp, k))
+ goto reject;
xfrm_pol_put(pol);
return 1;
xfrm_pol_put(pol);
return 0;
}
+EXPORT_SYMBOL(__xfrm_policy_check);
int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
{
return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0;
}
+EXPORT_SYMBOL(__xfrm_route_forward);
/* Optimize later using cookies and generation ids. */
if (!stale_bundle(dst))
return dst;
- dst_release(dst);
return NULL;
}
static int stale_bundle(struct dst_entry *dst)
{
- struct dst_entry *child = dst;
-
- while (child) {
- if (child->obsolete > 0 ||
- (child->dev && !netif_running(child->dev)) ||
- (child->xfrm && child->xfrm->km.state != XFRM_STATE_VALID)) {
- return 1;
- }
- child = child->child;
- }
-
- return 0;
+ return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC);
}
-static void xfrm_dst_destroy(struct dst_entry *dst)
+void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
{
- xfrm_state_put(dst->xfrm);
- dst->xfrm = NULL;
+ while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
+ dst->dev = &loopback_dev;
+ dev_hold(&loopback_dev);
+ dev_put(dev);
+ }
}
+EXPORT_SYMBOL(xfrm_dst_ifdown);
static void xfrm_link_failure(struct sk_buff *skb)
{
return 0;
}
+void xfrm_init_pmtu(struct dst_entry *dst)
+{
+ do {
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ u32 pmtu, route_mtu_cached;
+
+ pmtu = dst_mtu(dst->child);
+ xdst->child_mtu_cached = pmtu;
+
+ pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
+
+ route_mtu_cached = dst_mtu(xdst->route);
+ xdst->route_mtu_cached = route_mtu_cached;
+
+ if (pmtu > route_mtu_cached)
+ pmtu = route_mtu_cached;
+
+ dst->metrics[RTAX_MTU-1] = pmtu;
+ } while ((dst = dst->next));
+}
+
+EXPORT_SYMBOL(xfrm_init_pmtu);
+
+/* Check that the bundle accepts the flow and its components are
+ * still valid.
+ */
+
+int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
+{
+ struct dst_entry *dst = &first->u.dst;
+ struct xfrm_dst *last;
+ u32 mtu;
+
+ if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
+ (dst->dev && !netif_running(dst->dev)))
+ return 0;
+
+ last = NULL;
+
+ do {
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+
+ if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
+ return 0;
+ if (dst->xfrm->km.state != XFRM_STATE_VALID)
+ return 0;
+
+ mtu = dst_mtu(dst->child);
+ if (xdst->child_mtu_cached != mtu) {
+ last = xdst;
+ xdst->child_mtu_cached = mtu;
+ }
+
+ if (!dst_check(xdst->route, xdst->route_cookie))
+ return 0;
+ mtu = dst_mtu(xdst->route);
+ if (xdst->route_mtu_cached != mtu) {
+ last = xdst;
+ xdst->route_mtu_cached = mtu;
+ }
+
+ dst = dst->child;
+ } while (dst->xfrm);
+
+ if (likely(!last))
+ return 1;
+
+ mtu = last->child_mtu_cached;
+ for (;;) {
+ dst = &last->u.dst;
+
+ mtu = xfrm_state_mtu(dst->xfrm, mtu);
+ if (mtu > last->route_mtu_cached)
+ mtu = last->route_mtu_cached;
+ dst->metrics[RTAX_MTU-1] = mtu;
+
+ if (last == first)
+ break;
+
+ last = last->u.next;
+ last->child_mtu_cached = mtu;
+ }
+
+ return 1;
+}
+
+EXPORT_SYMBOL(xfrm_bundle_ok);
+
/* Well... that's _TASK_. We need to scan through transformation
* list and figure out what mss tcp should generate in order to
* final datagram fit to mtu. Mama mia... :-)
dst_ops->kmem_cachep = xfrm_dst_cache;
if (likely(dst_ops->check == NULL))
dst_ops->check = xfrm_dst_check;
- if (likely(dst_ops->destroy == NULL))
- dst_ops->destroy = xfrm_dst_destroy;
if (likely(dst_ops->negative_advice == NULL))
dst_ops->negative_advice = xfrm_negative_advice;
if (likely(dst_ops->link_failure == NULL))
write_unlock(&xfrm_policy_afinfo_lock);
return err;
}
+EXPORT_SYMBOL(xfrm_policy_register_afinfo);
int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
{
xfrm_policy_afinfo[afinfo->family] = NULL;
dst_ops->kmem_cachep = NULL;
dst_ops->check = NULL;
- dst_ops->destroy = NULL;
dst_ops->negative_advice = NULL;
dst_ops->link_failure = NULL;
dst_ops->get_mss = NULL;
write_unlock(&xfrm_policy_afinfo_lock);
return err;
}
+EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
-struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
+static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
{
struct xfrm_policy_afinfo *afinfo;
if (unlikely(family >= NPROTO))
return afinfo;
}
-void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
+static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
{
if (unlikely(afinfo == NULL))
return;
return NOTIFY_DONE;
}
-struct notifier_block xfrm_dev_notifier = {
+static struct notifier_block xfrm_dev_notifier = {
xfrm_dev_event,
NULL,
0
};
-void __init xfrm_policy_init(void)
+static void __init xfrm_policy_init(void)
{
xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
sizeof(struct xfrm_dst),