#include <linux/config.h>
#include <linux/module.h>
-#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
#include <net/ipv6.h>
#include <linux/icmpv6.h>
-#define MAX_SG_ONSTACK 4
-
-int esp6_output(struct sk_buff *skb)
+int esp6_output(struct sk_buff **pskb)
{
int err;
- int hdr_len = 0;
- struct dst_entry *dst = skb->dst;
+ int hdr_len;
+ struct dst_entry *dst = (*pskb)->dst;
struct xfrm_state *x = dst->xfrm;
- struct ipv6hdr *iph = NULL, *top_iph;
+ struct ipv6hdr *top_iph;
struct ipv6_esp_hdr *esph;
struct crypto_tfm *tfm;
struct esp_data *esp;
int clen;
int alen;
int nfrags;
- u8 *prevhdr;
- u8 nexthdr = 0;
-
- /* First, if the skb is not checksummed, complete checksum. */
- if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL) {
- err = -EINVAL;
- goto error_nolock;
- }
-
- spin_lock_bh(&x->lock);
- err = xfrm_check_output(x, skb, AF_INET6);
- if (err)
- goto error;
- err = -ENOMEM;
- /* Strip IP header in transport mode. Save it. */
+ esp = x->data;
+ hdr_len = (*pskb)->h.raw - (*pskb)->data +
+ sizeof(*esph) + esp->conf.ivlen;
- if (!x->props.mode) {
- hdr_len = ip6_find_1stfragopt(skb, &prevhdr);
- nexthdr = *prevhdr;
- *prevhdr = IPPROTO_ESP;
- iph = kmalloc(hdr_len, GFP_ATOMIC);
- if (!iph) {
- err = -ENOMEM;
- goto error;
- }
- memcpy(iph, skb->nh.raw, hdr_len);
- __skb_pull(skb, hdr_len);
- }
+ /* Strip IP+ESP header. */
+ __skb_pull(*pskb, hdr_len);
/* Now skb is pure payload to encrypt */
+ err = -ENOMEM;
/* Round to block size */
- clen = skb->len;
+ clen = (*pskb)->len;
- esp = x->data;
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3;
if (esp->conf.padlen)
clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);
- if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) {
- if (!x->props.mode && iph) kfree(iph);
+ if ((nfrags = skb_cow_data(*pskb, clen-(*pskb)->len+alen, &trailer)) < 0) {
goto error;
}
/* Fill padding... */
do {
int i;
- for (i=0; i<clen-skb->len - 2; i++)
+ for (i=0; i<clen-(*pskb)->len - 2; i++)
*(u8*)(trailer->tail + i) = i+1;
} while (0);
- *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
- pskb_put(skb, trailer, clen - skb->len);
+ *(u8*)(trailer->tail + clen-(*pskb)->len - 2) = (clen - (*pskb)->len)-2;
+ pskb_put(*pskb, trailer, clen - (*pskb)->len);
- if (x->props.mode) {
- iph = skb->nh.ipv6h;
- top_iph = (struct ipv6hdr*)skb_push(skb, x->props.header_len);
- esph = (struct ipv6_esp_hdr*)(top_iph+1);
- *(u8*)(trailer->tail - 1) = IPPROTO_IPV6;
- top_iph->version = 6;
- top_iph->priority = iph->priority;
- top_iph->flow_lbl[0] = iph->flow_lbl[0];
- top_iph->flow_lbl[1] = iph->flow_lbl[1];
- top_iph->flow_lbl[2] = iph->flow_lbl[2];
- if (x->props.flags & XFRM_STATE_NOECN)
- IP6_ECN_clear(top_iph);
- top_iph->nexthdr = IPPROTO_ESP;
- top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr));
- top_iph->hop_limit = iph->hop_limit;
- ipv6_addr_copy(&top_iph->saddr,
- (struct in6_addr *)&x->props.saddr);
- ipv6_addr_copy(&top_iph->daddr,
- (struct in6_addr *)&x->id.daddr);
- } else {
- esph = (struct ipv6_esp_hdr*)skb_push(skb, x->props.header_len);
- skb->h.raw = (unsigned char*)esph;
- top_iph = (struct ipv6hdr*)skb_push(skb, hdr_len);
- memcpy(top_iph, iph, hdr_len);
- kfree(iph);
- top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr));
- *(u8*)(trailer->tail - 1) = nexthdr;
- }
+ top_iph = (struct ipv6hdr *)__skb_push(*pskb, hdr_len);
+ esph = (struct ipv6_esp_hdr *)(*pskb)->h.raw;
+ top_iph->payload_len = htons((*pskb)->len + alen - sizeof(*top_iph));
+ *(u8*)(trailer->tail - 1) = *(*pskb)->nh.raw;
+ *(*pskb)->nh.raw = IPPROTO_ESP;
esph->spi = x->id.spi;
esph->seq_no = htonl(++x->replay.oseq);
crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
do {
- struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags];
- struct scatterlist *sg = sgbuf;
+ struct scatterlist *sg = &esp->sgbuf[0];
- if (unlikely(nfrags > MAX_SG_ONSTACK)) {
+ if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
if (!sg)
goto error;
}
- skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
+ skb_to_sgvec(*pskb, sg, esph->enc_data+esp->conf.ivlen-(*pskb)->data, clen);
crypto_cipher_encrypt(tfm, sg, sg, clen);
- if (unlikely(sg != sgbuf))
+ if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
} while (0);
}
if (esp->auth.icv_full_len) {
- esp->auth.icv(esp, skb, (u8*)esph-skb->data,
+ esp->auth.icv(esp, *pskb, (u8*)esph-(*pskb)->data,
sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
- pskb_put(skb, trailer, alen);
+ pskb_put(*pskb, trailer, alen);
}
- skb->nh.raw = skb->data;
-
- x->curlft.bytes += skb->len;
- x->curlft.packets++;
- spin_unlock_bh(&x->lock);
- if ((skb->dst = dst_pop(dst)) == NULL) {
- err = -EHOSTUNREACH;
- goto error_nolock;
- }
- return NET_XMIT_BYPASS;
+ err = 0;
error:
- spin_unlock_bh(&x->lock);
-error_nolock:
- kfree_skb(skb);
return err;
}
{
u8 nexthdr[2];
- struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags];
- struct scatterlist *sg = sgbuf;
+ struct scatterlist *sg = &esp->sgbuf[0];
u8 padlen;
- u8 *prevhdr;
- if (unlikely(nfrags > MAX_SG_ONSTACK)) {
+ if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
if (!sg) {
ret = -ENOMEM;
}
skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
- if (unlikely(sg != sgbuf))
+ if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
skb->nh.raw += sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
memcpy(skb->nh.raw, tmp_hdr, hdr_len);
skb->nh.ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
- ip6_find_1stfragopt(skb, &prevhdr);
- ret = *prevhdr = nexthdr[1];
+ ret = nexthdr[1];
}
out:
struct ipv6_esp_hdr *esph = (struct ipv6_esp_hdr*)(skb->data+offset);
struct xfrm_state *x;
- if (type != ICMPV6_DEST_UNREACH ||
+ if (type != ICMPV6_DEST_UNREACH &&
type != ICMPV6_PKT_TOOBIG)
return;