#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
-#include <linux/mutex.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/icmp.h>
int users;
};
-static DEFINE_MUTEX(ipcomp_resource_mutex);
+static DECLARE_MUTEX(ipcomp_resource_sem);
static void **ipcomp_scratches;
static int ipcomp_scratch_users;
static LIST_HEAD(ipcomp_tfms_list);
return err;
}
-static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb)
+static int ipcomp_input(struct xfrm_state *x,
+ struct xfrm_decap_state *decap, struct sk_buff *skb)
{
u8 nexthdr;
- int err = -ENOMEM;
+ int err = 0;
struct iphdr *iph;
union {
struct iphdr iph;
} tmp_iph;
- if (skb_linearize_cow(skb))
+ if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
+ skb_linearize(skb, GFP_ATOMIC) != 0) {
+ err = -ENOMEM;
goto out;
+ }
skb->ip_summed = CHECKSUM_NONE;
goto out_ok;
}
- if (skb_linearize_cow(skb))
+ if ((skb_is_nonlinear(skb) || skb_cloned(skb)) &&
+ skb_linearize(skb, GFP_ATOMIC) != 0) {
goto out_ok;
+ }
err = ipcomp_compress(x, skb);
iph = skb->nh.iph;
skb->h.icmph->code != ICMP_FRAG_NEEDED)
return;
- spi = htonl(ntohs(ipch->cpi));
+ spi = ntohl(ntohs(ipch->cpi));
x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr,
spi, IPPROTO_COMP, AF_INET);
if (!x)
}
/*
- * Must be protected by xfrm_cfg_mutex. State and tunnel user references are
+ * Must be protected by xfrm_cfg_sem. State and tunnel user references are
* always incremented on success.
*/
static int ipcomp_tunnel_attach(struct xfrm_state *x)
if (!scratches)
return;
- for_each_possible_cpu(i)
- vfree(*per_cpu_ptr(scratches, i));
+ for_each_cpu(i) {
+ void *scratch = *per_cpu_ptr(scratches, i);
+ if (scratch)
+ vfree(scratch);
+ }
free_percpu(scratches);
}
ipcomp_scratches = scratches;
- for_each_possible_cpu(i) {
+ for_each_cpu(i) {
void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
if (!scratch)
return NULL;
if (!tfms)
return;
- for_each_possible_cpu(cpu) {
+ for_each_cpu(cpu) {
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_tfm(tfm);
}
if (!tfms)
goto error;
- for_each_possible_cpu(cpu) {
+ for_each_cpu(cpu) {
struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
if (!tfm)
goto error;
if (!ipcd)
return;
xfrm_state_delete_tunnel(x);
- mutex_lock(&ipcomp_resource_mutex);
+ down(&ipcomp_resource_sem);
ipcomp_free_data(ipcd);
- mutex_unlock(&ipcomp_resource_mutex);
+ up(&ipcomp_resource_sem);
kfree(ipcd);
}
if (x->props.mode)
x->props.header_len += sizeof(struct iphdr);
- mutex_lock(&ipcomp_resource_mutex);
+ down(&ipcomp_resource_sem);
if (!ipcomp_alloc_scratches())
goto error;
ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
if (!ipcd->tfms)
goto error;
- mutex_unlock(&ipcomp_resource_mutex);
+ up(&ipcomp_resource_sem);
if (x->props.mode) {
err = ipcomp_tunnel_attach(x);
return err;
error_tunnel:
- mutex_lock(&ipcomp_resource_mutex);
+ down(&ipcomp_resource_sem);
error:
ipcomp_free_data(ipcd);
- mutex_unlock(&ipcomp_resource_mutex);
+ up(&ipcomp_resource_sem);
kfree(ipcd);
goto out;
}