X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fipv4%2Fipcomp.c;h=3839b706142e3816f73bbc9a6ff361a9f8467d44;hb=refs%2Fheads%2Fvserver;hp=6845207f2eddb64383c74957f997ed9a59c69b99;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 6845207f2..3839b7061 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -13,7 +13,6 @@ * - Compression stats. * - Adaptive compression. */ -#include #include #include #include @@ -24,18 +23,20 @@ #include #include #include +#include #include #include #include #include +#include struct ipcomp_tfms { struct list_head list; - struct crypto_tfm **tfms; + struct crypto_comp **tfms; int users; }; -static DECLARE_MUTEX(ipcomp_resource_sem); +static DEFINE_MUTEX(ipcomp_resource_mutex); static void **ipcomp_scratches; static int ipcomp_scratch_users; static LIST_HEAD(ipcomp_tfms_list); @@ -43,10 +44,9 @@ static LIST_HEAD(ipcomp_tfms_list); static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) { int err, plen, dlen; - struct iphdr *iph; struct ipcomp_data *ipcd = x->data; u8 *start, *scratch; - struct crypto_tfm *tfm; + struct crypto_comp *tfm; int cpu; plen = skb->len; @@ -70,46 +70,31 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) if (err) goto out; - skb_put(skb, dlen - plen); + skb->truesize += dlen - plen; + __skb_put(skb, dlen - plen); memcpy(skb->data, scratch, dlen); - iph = skb->nh.iph; - iph->tot_len = htons(dlen + iph->ihl * 4); out: put_cpu(); return err; } -static int ipcomp_input(struct xfrm_state *x, - struct xfrm_decap_state *decap, struct sk_buff *skb) +static int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) { - u8 nexthdr; - int err = 0; + int err = -ENOMEM; struct iphdr *iph; - union { - struct iphdr iph; - char buf[60]; - } tmp_iph; - + struct ip_comp_hdr *ipch; - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && - skb_linearize(skb, GFP_ATOMIC) != 0) { - err = -ENOMEM; + if (skb_linearize_cow(skb)) goto out; - } skb->ip_summed = CHECKSUM_NONE; /* Remove ipcomp header and decompress original payload */ iph = skb->nh.iph; - memcpy(&tmp_iph, iph, iph->ihl * 4); - nexthdr = *(u8 *)skb->data; - skb_pull(skb, sizeof(struct ip_comp_hdr)); - skb->nh.raw += sizeof(struct ip_comp_hdr); - memcpy(skb->nh.raw, &tmp_iph, tmp_iph.iph.ihl * 4); - iph = skb->nh.iph; - iph->tot_len = htons(ntohs(iph->tot_len) - sizeof(struct ip_comp_hdr)); - iph->protocol = nexthdr; - skb->h.raw = skb->data; + ipch = (void *)skb->data; + iph->protocol = ipch->nexthdr; + skb->h.raw = skb->nh.raw + sizeof(*ipch); + __skb_pull(skb, sizeof(*ipch)); err = ipcomp_decompress(x, skb); out: @@ -122,7 +107,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) struct iphdr *iph = skb->nh.iph; struct ipcomp_data *ipcd = x->data; u8 *start, *scratch; - struct crypto_tfm *tfm; + struct crypto_comp *tfm; int cpu; ihlen = iph->ihl * 4; @@ -154,11 +139,9 @@ out: return err; } -static int ipcomp_output(struct sk_buff *skb) +static int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) { int err; - struct dst_entry *dst = skb->dst; - struct xfrm_state *x = dst->xfrm; struct iphdr *iph; struct ip_comp_hdr *ipch; struct ipcomp_data *ipcd = x->data; @@ -169,32 +152,20 @@ static int ipcomp_output(struct sk_buff *skb) hdr_len = iph->ihl * 4; if ((skb->len - hdr_len) < ipcd->threshold) { /* Don't bother compressing */ - if (x->props.mode) { - ip_send_check(iph); - } goto out_ok; } - if ((skb_is_nonlinear(skb) || skb_cloned(skb)) && - skb_linearize(skb, GFP_ATOMIC) != 0) { - err = -ENOMEM; - goto error; - } + if (skb_linearize_cow(skb)) + goto out_ok; err = ipcomp_compress(x, skb); + iph = skb->nh.iph; + if (err) { - if (err == -EMSGSIZE) { - if (x->props.mode) { - iph = skb->nh.iph; - ip_send_check(iph); - } - goto out_ok; - } - goto error; + goto out_ok; } /* Install ipcomp header, convert into ipcomp datagram. */ - iph = skb->nh.iph; iph->tot_len = htons(skb->len); ipch = (struct ip_comp_hdr *)((char *)iph + iph->ihl * 4); ipch->nexthdr = iph->protocol; @@ -202,17 +173,17 @@ static int ipcomp_output(struct sk_buff *skb) ipch->cpi = htons((u16 )ntohl(x->id.spi)); iph->protocol = IPPROTO_COMP; ip_send_check(iph); + return 0; out_ok: - err = 0; - -error: - return err; + if (x->props.mode == XFRM_MODE_TUNNEL) + ip_send_check(iph); + return 0; } static void ipcomp4_err(struct sk_buff *skb, u32 info) { - u32 spi; + __be32 spi; struct iphdr *iph = (struct iphdr *)skb->data; struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; @@ -221,13 +192,13 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info) skb->h.icmph->code != ICMP_FRAG_NEEDED) return; - spi = ntohl(ntohs(ipch->cpi)); + spi = htonl(ntohs(ipch->cpi)); x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET); if (!x) return; - NETDEBUG(printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n", - spi, NIPQUAD(iph->daddr))); + NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%u.%u.%u.%u\n", + spi, NIPQUAD(iph->daddr)); xfrm_state_put(x); } @@ -235,6 +206,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info) static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) { struct xfrm_state *t; + u8 mode = XFRM_MODE_TUNNEL; t = xfrm_state_alloc(); if (t == NULL) @@ -245,18 +217,15 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) t->id.daddr.a4 = x->id.daddr.a4; memcpy(&t->sel, &x->sel, sizeof(t->sel)); t->props.family = AF_INET; - t->props.mode = 1; + if (x->props.mode == XFRM_MODE_BEET) + mode = x->props.mode; + t->props.mode = mode; t->props.saddr.a4 = x->props.saddr.a4; t->props.flags = x->props.flags; - - t->type = xfrm_get_type(IPPROTO_IPIP, t->props.family); - if (t->type == NULL) - goto error; - - if (t->type->init_state(t, NULL)) + + if (xfrm_init_state(t)) goto error; - t->km.state = XFRM_STATE_VALID; atomic_set(&t->tunnel_users, 1); out: return t; @@ -269,7 +238,7 @@ error: } /* - * Must be protected by xfrm_cfg_sem. State and tunnel user references are + * Must be protected by xfrm_cfg_mutex. State and tunnel user references are * always incremented on success. */ static int ipcomp_tunnel_attach(struct xfrm_state *x) @@ -306,11 +275,8 @@ static void ipcomp_free_scratches(void) if (!scratches) return; - for_each_cpu(i) { - void *scratch = *per_cpu_ptr(scratches, i); - if (scratch) - vfree(scratch); - } + for_each_possible_cpu(i) + vfree(*per_cpu_ptr(scratches, i)); free_percpu(scratches); } @@ -329,7 +295,7 @@ static void **ipcomp_alloc_scratches(void) ipcomp_scratches = scratches; - for_each_cpu(i) { + for_each_possible_cpu(i) { void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE); if (!scratch) return NULL; @@ -339,7 +305,7 @@ static void **ipcomp_alloc_scratches(void) return scratches; } -static void ipcomp_free_tfms(struct crypto_tfm **tfms) +static void ipcomp_free_tfms(struct crypto_comp **tfms) { struct ipcomp_tfms *pos; int cpu; @@ -360,30 +326,29 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms) if (!tfms) return; - for_each_cpu(cpu) { - struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); - if (tfm) - crypto_free_tfm(tfm); + for_each_possible_cpu(cpu) { + struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); + crypto_free_comp(tfm); } free_percpu(tfms); } -static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) +static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) { struct ipcomp_tfms *pos; - struct crypto_tfm **tfms; + struct crypto_comp **tfms; int cpu; /* This can be any valid CPU ID so we don't need locking. */ - cpu = smp_processor_id(); + cpu = raw_smp_processor_id(); list_for_each_entry(pos, &ipcomp_tfms_list, list) { - struct crypto_tfm *tfm; + struct crypto_comp *tfm; tfms = pos->tfms; tfm = *per_cpu_ptr(tfms, cpu); - if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { + if (!strcmp(crypto_comp_name(tfm), alg_name)) { pos->users++; return tfms; } @@ -397,12 +362,13 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) INIT_LIST_HEAD(&pos->list); list_add(&pos->list, &ipcomp_tfms_list); - pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); + pos->tfms = tfms = alloc_percpu(struct crypto_comp *); if (!tfms) goto error; - for_each_cpu(cpu) { - struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); + for_each_possible_cpu(cpu) { + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, + CRYPTO_ALG_ASYNC); if (!tfm) goto error; *per_cpu_ptr(tfms, cpu) = tfm; @@ -428,13 +394,13 @@ static void ipcomp_destroy(struct xfrm_state *x) if (!ipcd) return; xfrm_state_delete_tunnel(x); - down(&ipcomp_resource_sem); + mutex_lock(&ipcomp_resource_mutex); ipcomp_free_data(ipcd); - up(&ipcomp_resource_sem); + mutex_unlock(&ipcomp_resource_mutex); kfree(ipcd); } -static int ipcomp_init_state(struct xfrm_state *x, void *args) +static int ipcomp_init_state(struct xfrm_state *x) { int err; struct ipcomp_data *ipcd; @@ -448,31 +414,30 @@ static int ipcomp_init_state(struct xfrm_state *x, void *args) goto out; err = -ENOMEM; - ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL); + ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); if (!ipcd) goto out; - memset(ipcd, 0, sizeof(*ipcd)); x->props.header_len = 0; - if (x->props.mode) + if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); - down(&ipcomp_resource_sem); + mutex_lock(&ipcomp_resource_mutex); if (!ipcomp_alloc_scratches()) goto error; ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); if (!ipcd->tfms) goto error; - up(&ipcomp_resource_sem); + mutex_unlock(&ipcomp_resource_mutex); - if (x->props.mode) { + if (x->props.mode == XFRM_MODE_TUNNEL) { err = ipcomp_tunnel_attach(x); if (err) goto error_tunnel; } - calg_desc = xfrm_calg_get_byname(x->calg->alg_name); + calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); BUG_ON(!calg_desc); ipcd->threshold = calg_desc->uinfo.comp.threshold; x->data = ipcd; @@ -481,10 +446,10 @@ out: return err; error_tunnel: - down(&ipcomp_resource_sem); + mutex_lock(&ipcomp_resource_mutex); error: ipcomp_free_data(ipcd); - up(&ipcomp_resource_sem); + mutex_unlock(&ipcomp_resource_mutex); kfree(ipcd); goto out; }