X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fipv6%2Fip6_output.c;h=7b7bd44fbf47f3a4b66af6a5bfef62ee8bff4d99;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=81eed71ddfe3fd5a82755306b2506b5392615427;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 81eed71dd..7b7bd44fb 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -28,7 +28,6 @@ * for datagram xmit */ -#include #include #include #include @@ -39,6 +38,7 @@ #include #include #include +#include #include #include @@ -54,13 +54,14 @@ #include #include #include +#include -static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)); +static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)); static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr) { static u32 ipv6_fragmentation_id = 1; - static spinlock_t ip6_id_lock = SPIN_LOCK_UNLOCKED; + static DEFINE_SPINLOCK(ip6_id_lock); spin_lock_bh(&ip6_id_lock); fhdr->identification = htonl(ipv6_fragmentation_id); @@ -71,23 +72,14 @@ static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *f static inline int ip6_output_finish(struct sk_buff *skb) { - struct dst_entry *dst = skb->dst; - struct hh_cache *hh = dst->hh; - - if (hh) { - int hh_alen; - - read_lock_bh(&hh->hh_lock); - hh_alen = HH_DATA_ALIGN(hh->hh_len); - memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); - read_unlock_bh(&hh->hh_lock); - skb_push(skb, hh->hh_len); - return hh->hh_output(skb); - } else if (dst->neighbour) + + if (dst->hh) + return neigh_hh_output(dst->hh, skb); + else if (dst->neighbour) return dst->neighbour->output(skb); - IP6_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); + IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; @@ -107,9 +99,8 @@ static int ip6_dev_loopback_xmit(struct sk_buff *newskb) } -static int ip6_output2(struct sk_buff **pskb) +static int ip6_output2(struct sk_buff *skb) { - struct sk_buff *skb = *pskb; struct dst_entry *dst = skb->dst; struct net_device *dev = dst->dev; @@ -118,6 +109,7 @@ static int ip6_output2(struct sk_buff **pskb) if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) { struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; + struct inet6_dev *idev = ip6_dst_idev(skb->dst); if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr, @@ -133,71 +125,25 @@ static int ip6_output2(struct sk_buff **pskb) ip6_dev_loopback_xmit); if (skb->nh.ipv6h->hop_limit == 0) { - IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); + IP6_INC_STATS(idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } - IP6_INC_STATS(IPSTATS_MIB_OUTMCASTPKTS); + IP6_INC_STATS(idev, IPSTATS_MIB_OUTMCASTPKTS); } return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish); } -int ip6_output(struct sk_buff **pskb) +int ip6_output(struct sk_buff *skb) { - struct sk_buff *skb = *pskb; - - if ((skb->len > dst_pmtu(skb->dst) || skb_shinfo(skb)->frag_list)) - return ip6_fragment(pskb, ip6_output2); + if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) || + dst_allfrag(skb->dst)) + return ip6_fragment(skb, ip6_output2); else - return ip6_output2(pskb); -} - -#ifdef CONFIG_NETFILTER -int ip6_route_me_harder(struct sk_buff *skb) -{ - struct ipv6hdr *iph = skb->nh.ipv6h; - struct dst_entry *dst; - struct flowi fl = { - .oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, - .nl_u = - { .ip6_u = - { .daddr = iph->daddr, - .saddr = iph->saddr, } }, - .proto = iph->nexthdr, - }; - - dst = ip6_route_output(skb->sk, &fl); - - if (dst->error) { - IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES); - LIMIT_NETDEBUG( - printk(KERN_DEBUG "ip6_route_me_harder: No more route.\n")); - dst_release(dst); - return -EINVAL; - } - - /* Drop old route. */ - dst_release(skb->dst); - - skb->dst = dst; - return 0; -} -#endif - -static inline int ip6_maybe_reroute(struct sk_buff *skb) -{ -#ifdef CONFIG_NETFILTER - if (skb->nfcache & NFC_ALTERED){ - if (ip6_route_me_harder(skb) != 0){ - kfree_skb(skb); - return -EINVAL; - } - } -#endif /* CONFIG_NETFILTER */ - return dst_output(skb); + return ip6_output2(skb); } /* @@ -207,13 +153,13 @@ static inline int ip6_maybe_reroute(struct sk_buff *skb) int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, struct ipv6_txoptions *opt, int ipfragok) { - struct ipv6_pinfo *np = sk ? inet6_sk(sk) : NULL; + struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl->fl6_dst; struct dst_entry *dst = skb->dst; struct ipv6hdr *hdr; u8 proto = fl->proto; int seg_len = skb->len; - int hlimit; + int hlimit, tclass; u32 mtu; if (opt) { @@ -228,12 +174,14 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, if (skb_headroom(skb) < head_room) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); - kfree_skb(skb); - skb = skb2; - if (skb == NULL) { - IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); + if (skb2 == NULL) { + IP6_INC_STATS(ip6_dst_idev(skb->dst), + IPSTATS_MIB_OUTDISCARDS); + kfree_skb(skb); return -ENOBUFS; } + kfree_skb(skb); + skb = skb2; if (sk) skb_set_owner_w(skb, sk); } @@ -249,12 +197,21 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, * Fill in the IPv6 header */ - *(u32*)hdr = htonl(0x60000000) | fl->fl6_flowlabel; hlimit = -1; if (np) hlimit = np->hop_limit; if (hlimit < 0) hlimit = dst_metric(dst, RTAX_HOPLIMIT); + if (hlimit < 0) + hlimit = ipv6_get_hoplimit(dst->dev); + + tclass = -1; + if (np) + tclass = np->tclass; + if (tclass < 0) + tclass = 0; + + *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel; hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; @@ -263,17 +220,21 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); ipv6_addr_copy(&hdr->daddr, first_hop); - mtu = dst_pmtu(dst); - if ((skb->len <= mtu) || ipfragok) { - IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); - return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute); + skb->priority = sk->sk_priority; + + mtu = dst_mtu(dst); + if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { + IP6_INC_STATS(ip6_dst_idev(skb->dst), + IPSTATS_MIB_OUTREQUESTS); + return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, + dst_output); } if (net_ratelimit()) printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n"); skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); - IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); + IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } @@ -301,7 +262,7 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr)); skb->nh.ipv6h = hdr; - *(u32*)hdr = htonl(0x60000000); + *(__be32*)hdr = htonl(0x60000000); hdr->payload_len = htons(len); hdr->nexthdr = proto; @@ -313,7 +274,7 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, return 0; } -int ip6_call_ra_chain(struct sk_buff *skb, int sel) +static int ip6_call_ra_chain(struct sk_buff *skb, int sel) { struct ip6_ra_chain *ra; struct sock *last = NULL; @@ -321,7 +282,9 @@ int ip6_call_ra_chain(struct sk_buff *skb, int sel) read_lock(&ip6_ra_lock); for (ra = ip6_ra_chain; ra; ra = ra->next) { struct sock *sk = ra->sk; - if (sk && ra->sel == sel) { + if (sk && ra->sel == sel && + (!sk->sk_bound_dev_if || + sk->sk_bound_dev_if == skb->dev->ifindex)) { if (last) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) @@ -340,6 +303,56 @@ int ip6_call_ra_chain(struct sk_buff *skb, int sel) return 0; } +static int ip6_forward_proxy_check(struct sk_buff *skb) +{ + struct ipv6hdr *hdr = skb->nh.ipv6h; + u8 nexthdr = hdr->nexthdr; + int offset; + + if (ipv6_ext_hdr(nexthdr)) { + offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr); + if (offset < 0) + return 0; + } else + offset = sizeof(struct ipv6hdr); + + if (nexthdr == IPPROTO_ICMPV6) { + struct icmp6hdr *icmp6; + + if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data)) + return 0; + + icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset); + + switch (icmp6->icmp6_type) { + case NDISC_ROUTER_SOLICITATION: + case NDISC_ROUTER_ADVERTISEMENT: + case NDISC_NEIGHBOUR_SOLICITATION: + case NDISC_NEIGHBOUR_ADVERTISEMENT: + case NDISC_REDIRECT: + /* For reaction involving unicast neighbor discovery + * message destined to the proxied address, pass it to + * input function. + */ + return 1; + default: + break; + } + } + + /* + * The proxying router can't forward traffic sent to a link-local + * address, so signal the sender and discard the packet. This + * behavior is clarified by the MIPv6 specification. + */ + if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) { + dst_link_failure(skb); + return -1; + } + + return 0; +} + static inline int ip6_forward_finish(struct sk_buff *skb) { return dst_output(skb); @@ -355,7 +368,7 @@ int ip6_forward(struct sk_buff *skb) goto error; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { - IP6_INC_STATS(IPSTATS_MIB_INDISCARDS); + IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } @@ -388,15 +401,29 @@ int ip6_forward(struct sk_buff *skb) skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0, skb->dev); + IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } + /* XXX: idev->cnf.proxy_ndp? */ + if (ipv6_devconf.proxy_ndp && + pneigh_lookup(&nd_tbl, &hdr->daddr, skb->dev, 0)) { + int proxied = ip6_forward_proxy_check(skb); + if (proxied > 0) + return ip6_input(skb); + else if (proxied < 0) { + IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); + goto drop; + } + } + if (!xfrm6_route_forward(skb)) { - IP6_INC_STATS(IPSTATS_MIB_INDISCARDS); + IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } + dst = skb->dst; /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. @@ -428,18 +455,18 @@ int ip6_forward(struct sk_buff *skb) goto error; } - if (skb->len > dst_pmtu(dst)) { + if (skb->len > dst_mtu(dst)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_pmtu(dst), skb->dev); - IP6_INC_STATS_BH(IPSTATS_MIB_INTOOBIGERRORS); - IP6_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS); + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); + IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); + IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { - IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); + IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } @@ -449,11 +476,11 @@ int ip6_forward(struct sk_buff *skb) hdr->hop_limit--; - IP6_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS); + IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: - IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS); + IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; @@ -464,27 +491,32 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->pkt_type = from->pkt_type; to->priority = from->priority; to->protocol = from->protocol; - to->security = from->security; + dst_release(to->dst); to->dst = dst_clone(from->dst); to->dev = from->dev; + to->mark = from->mark; #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif #ifdef CONFIG_NETFILTER - to->nfmark = from->nfmark; /* Connection association is same as pre-frag packet */ + nf_conntrack_put(to->nfct); to->nfct = from->nfct; nf_conntrack_get(to->nfct); + to->nfctinfo = from->nfctinfo; +#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put_reasm(to->nfct_reasm); + to->nfct_reasm = from->nfct_reasm; + nf_conntrack_get_reasm(to->nfct_reasm); +#endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(to->nf_bridge); to->nf_bridge = from->nf_bridge; nf_bridge_get(to->nf_bridge); #endif -#ifdef CONFIG_NETFILTER_DEBUG - to->nf_debug = from->nf_debug; -#endif #endif + skb_copy_secmark(to, from); } int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) @@ -500,31 +532,41 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) switch (**nexthdr) { case NEXTHDR_HOP: + break; case NEXTHDR_ROUTING: + found_rhdr = 1; + break; case NEXTHDR_DEST: - if (**nexthdr == NEXTHDR_ROUTING) found_rhdr = 1; - if (**nexthdr == NEXTHDR_DEST && found_rhdr) return offset; - offset += ipv6_optlen(exthdr); - *nexthdr = &exthdr->nexthdr; - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); +#ifdef CONFIG_IPV6_MIP6 + if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) + break; +#endif + if (found_rhdr) + return offset; break; default : return offset; } + + offset += ipv6_optlen(exthdr); + *nexthdr = &exthdr->nexthdr; + exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); } return offset; } +EXPORT_SYMBOL_GPL(ip6_find_1stfragopt); -static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)) +static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct net_device *dev; - struct sk_buff *frag, *skb = *pskb; + struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info*)skb->dst; + struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; - u32 frag_id = 0; + __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; @@ -532,7 +574,12 @@ static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)) hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; - mtu = dst_pmtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr); + mtu = dst_mtu(&rt->u.dst); + if (np && np->frag_size < mtu) { + if (np->frag_size) + mtu = np->frag_size; + } + mtu -= hlen + sizeof(struct frag_hdr); if (skb_shinfo(skb)->frag_list) { int first_len = skb_pagelen(skb); @@ -549,13 +596,17 @@ static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)) skb_headroom(frag) < hlen) goto slow_path; - /* Correct socket ownership. */ - if (frag->sk == NULL) - goto slow_path; - /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path; + + BUG_ON(frag->sk); + if (skb->sk) { + sock_hold(skb->sk); + frag->sk = skb->sk; + frag->destructor = sock_wfree; + skb->truesize -= frag->truesize; + } } err = 0; @@ -564,14 +615,13 @@ static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)) skb_shinfo(skb)->frag_list = NULL; /* BUILD HEADER */ - tmp_hdr = kmalloc(hlen, GFP_ATOMIC); + *prevhdr = NEXTHDR_FRAGMENT; + tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC); if (!tmp_hdr) { - IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); + IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } - *prevhdr = NEXTHDR_FRAGMENT; - memcpy(tmp_hdr, skb->nh.raw, hlen); __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); skb->nh.raw = __skb_push(skb, hlen); @@ -587,12 +637,14 @@ static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)) skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr)); - + + dst_hold(&rt->u.dst); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { + frag->ip_summed = CHECKSUM_NONE; frag->h.raw = frag->data; fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); frag->nh.raw = __skb_push(frag, hlen); @@ -608,7 +660,10 @@ static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)) ip6_copy_metadata(frag, skb); } - err = output(&skb); + err = output(skb); + if(!err) + IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGCREATES); + if (err || !frag) break; @@ -617,11 +672,11 @@ static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)) skb->next = NULL; } - if (tmp_hdr) - kfree(tmp_hdr); + kfree(tmp_hdr); if (err == 0) { - IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); + IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGOKS); + dst_release(&rt->u.dst); return 0; } @@ -631,7 +686,8 @@ static int ip6_fragment(struct sk_buff **pskb, int (*output)(struct sk_buff**)) frag = skb; } - IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); + IP6_INC_STATS(ip6_dst_idev(&rt->u.dst), IPSTATS_MIB_FRAGFAILS); + dst_release(&rt->u.dst); return err; } @@ -663,8 +719,9 @@ slow_path: */ if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { - NETDEBUG(printk(KERN_INFO "IPv6: frag: no memory for new fragment!\n")); - IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); + NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); + IP6_INC_STATS(ip6_dst_idev(skb->dst), + IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } @@ -697,7 +754,7 @@ slow_path: */ fh->nexthdr = nexthdr; fh->reserved = 0; - if (frag_id) { + if (!frag_id) { ipv6_select_ident(skb, fh); frag_id = fh->identification; } else @@ -721,64 +778,77 @@ slow_path: /* * Put this fragment into the sending queue. */ - - IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES); - - err = output(&frag); + err = output(frag); if (err) goto fail; + + IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGCREATES); } + IP6_INC_STATS(ip6_dst_idev(skb->dst), + IPSTATS_MIB_FRAGOKS); kfree_skb(skb); - IP6_INC_STATS(IPSTATS_MIB_FRAGOKS); return err; fail: + IP6_INC_STATS(ip6_dst_idev(skb->dst), + IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); - IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS); return err; } -int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) +static inline int ip6_rt_check(struct rt6key *rt_key, + struct in6_addr *fl_addr, + struct in6_addr *addr_cache) { - int err = 0; + return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && + (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache))); +} - *dst = NULL; - if (sk) { - struct ipv6_pinfo *np = inet6_sk(sk); - - *dst = __sk_dst_check(sk, np->dst_cookie); - if (*dst) { - struct rt6_info *rt = (struct rt6_info*)*dst; - - /* Yes, checking route validity in not connected - case is not very simple. Take into account, - that we do not support routing by source, TOS, - and MSG_DONTROUTE --ANK (980726) - - 1. If route was host route, check that - cached destination is current. - If it is network route, we still may - check its validity using saved pointer - to the last used address: daddr_cache. - We do not want to save whole address now, - (because main consumer of this service - is tcp, which has not this problem), - so that the last trick works only on connected - sockets. - 2. oif also should be the same. - */ - - if (((rt->rt6i_dst.plen != 128 || - ipv6_addr_cmp(&fl->fl6_dst, &rt->rt6i_dst.addr)) - && (np->daddr_cache == NULL || - ipv6_addr_cmp(&fl->fl6_dst, np->daddr_cache))) - || (fl->oif && fl->oif != (*dst)->dev->ifindex)) { - *dst = NULL; - } else - dst_hold(*dst); - } +static struct dst_entry *ip6_sk_dst_check(struct sock *sk, + struct dst_entry *dst, + struct flowi *fl) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + struct rt6_info *rt = (struct rt6_info *)dst; + + if (!dst) + goto out; + + /* Yes, checking route validity in not connected + * case is not very simple. Take into account, + * that we do not support routing by source, TOS, + * and MSG_DONTROUTE --ANK (980726) + * + * 1. ip6_rt_check(): If route was host route, + * check that cached destination is current. + * If it is network route, we still may + * check its validity using saved pointer + * to the last used address: daddr_cache. + * We do not want to save whole address now, + * (because main consumer of this service + * is tcp, which has not this problem), + * so that the last trick works only on connected + * sockets. + * 2. oif also should be the same. + */ + if (ip6_rt_check(&rt->rt6i_dst, &fl->fl6_dst, np->daddr_cache) || +#ifdef CONFIG_IPV6_SUBTREES + ip6_rt_check(&rt->rt6i_src, &fl->fl6_src, np->saddr_cache) || +#endif + (fl->oif && fl->oif != dst->dev->ifindex)) { + dst_release(dst); + dst = NULL; } +out: + return dst; +} + +static int ip6_dst_lookup_tail(struct sock *sk, + struct dst_entry **dst, struct flowi *fl) +{ + int err; + if (*dst == NULL) *dst = ip6_route_output(sk, fl); @@ -787,19 +857,9 @@ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) if (ipv6_addr_any(&fl->fl6_src)) { err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src); - - if (err) { -#if IP6_DEBUG >= 2 - printk(KERN_DEBUG "ip6_dst_lookup: " - "no available source address\n"); -#endif + if (err) goto out_err_release; - } } - if ((err = xfrm_lookup(dst, fl, sk, 0)) < 0) { - err = -ENETUNREACH; - goto out_err_release; - } return 0; @@ -809,19 +869,123 @@ out_err_release: return err; } -int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, - int hlimit, struct ipv6_txoptions *opt, struct flowi *fl, struct rt6_info *rt, - unsigned int flags) +/** + * ip6_dst_lookup - perform route lookup on flow + * @sk: socket which provides route info + * @dst: pointer to dst_entry * for result + * @fl: flow to lookup + * + * This function performs a route lookup on the given flow. + * + * It returns zero on success, or a standard errno code on error. + */ +int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) +{ + *dst = NULL; + return ip6_dst_lookup_tail(sk, dst, fl); +} +EXPORT_SYMBOL_GPL(ip6_dst_lookup); + +/** + * ip6_sk_dst_lookup - perform socket cached route lookup on flow + * @sk: socket which provides the dst cache and route info + * @dst: pointer to dst_entry * for result + * @fl: flow to lookup + * + * This function performs a route lookup on the given flow with the + * possibility of using the cached route in the socket if it is valid. + * It will take the socket dst lock when operating on the dst cache. + * As a result, this function can only be used in process context. + * + * It returns zero on success, or a standard errno code on error. + */ +int ip6_sk_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi *fl) +{ + *dst = NULL; + if (sk) { + *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); + *dst = ip6_sk_dst_check(sk, *dst, fl); + } + + return ip6_dst_lookup_tail(sk, dst, fl); +} +EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup); + +static inline int ip6_ufo_append_data(struct sock *sk, + int getfrag(void *from, char *to, int offset, int len, + int odd, struct sk_buff *skb), + void *from, int length, int hh_len, int fragheaderlen, + int transhdrlen, int mtu,unsigned int flags) + { - struct inet_opt *inet = inet_sk(sk); + struct sk_buff *skb; + int err; + + /* There is support for UDP large send offload by network + * device, so create one single skb packet containing complete + * udp datagram + */ + if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { + skb = sock_alloc_send_skb(sk, + hh_len + fragheaderlen + transhdrlen + 20, + (flags & MSG_DONTWAIT), &err); + if (skb == NULL) + return -ENOMEM; + + /* reserve space for Hardware header */ + skb_reserve(skb, hh_len); + + /* create space for UDP/IP header */ + skb_put(skb,fragheaderlen + transhdrlen); + + /* initialize network header pointer */ + skb->nh.raw = skb->data; + + /* initialize protocol header pointer */ + skb->h.raw = skb->data + fragheaderlen; + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum = 0; + sk->sk_sndmsg_off = 0; + } + + err = skb_append_datato_frags(sk,skb, getfrag, from, + (length - transhdrlen)); + if (!err) { + struct frag_hdr fhdr; + + /* specify the length of each IP datagram fragment*/ + skb_shinfo(skb)->gso_size = mtu - fragheaderlen - + sizeof(struct frag_hdr); + skb_shinfo(skb)->gso_type = SKB_GSO_UDP; + ipv6_select_ident(skb, &fhdr); + skb_shinfo(skb)->ip6_frag_id = fhdr.identification; + __skb_queue_tail(&sk->sk_write_queue, skb); + + return 0; + } + /* There is not enough support do UPD LSO, + * so follow normal path + */ + kfree_skb(skb); + + return err; +} + +int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, + int offset, int len, int odd, struct sk_buff *skb), + void *from, int length, int transhdrlen, + int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl, + struct rt6_info *rt, unsigned int flags) +{ + struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; unsigned int maxfraglen, fragheaderlen; int exthdrlen; int hh_len; int mtu; - int copy = 0; + int copy; int err; int offset = 0; int csummode = CHECKSUM_NONE; @@ -850,7 +1014,15 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offse np->cork.rt = rt; inet->cork.fl = *fl; np->cork.hop_limit = hlimit; - inet->cork.fragsize = mtu = dst_pmtu(&rt->u.dst); + np->cork.tclass = tclass; + mtu = dst_mtu(rt->u.dst.path); + if (np->frag_size < mtu) { + if (np->frag_size) + mtu = np->frag_size; + } + inet->cork.fragsize = mtu; + if (dst_allfrag(rt->u.dst.path)) + inet->cork.flags |= IPCORK_ALLFRAG; inet->cork.length = 0; sk->sk_sndmsg_page = NULL; sk->sk_sndmsg_off = 0; @@ -869,7 +1041,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offse hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); - fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0); + fragheaderlen = sizeof(struct ipv6hdr) + rt->u.dst.nfheader_len + (opt ? opt->opt_nflen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { @@ -879,29 +1051,89 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offse } } + /* + * Let's try using as much space as possible. + * Use MTU if total length of the message fits into the MTU. + * Otherwise, we need to reserve fragment header and + * fragment alignment (= 8-15 octects, in total). + * + * Note that we may need to "move" the data from the tail of + * of the buffer to the new fragment when we split + * the message. + * + * FIXME: It may be fragmented into multiple chunks + * at once if non-fragmentable extension headers + * are too large. + * --yoshfuji + */ + inet->cork.length += length; + if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) && + (rt->u.dst.dev->features & NETIF_F_UFO)) { + + err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, + fragheaderlen, transhdrlen, mtu, + flags); + if (err) + goto error; + return 0; + } if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) goto alloc_new_skb; while (length > 0) { - if ((copy = maxfraglen - skb->len) <= 0) { + /* Check if the remaining data fits into current packet. */ + copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len; + if (copy < length) + copy = maxfraglen - skb->len; + + if (copy <= 0) { char *data; unsigned int datalen; unsigned int fraglen; + unsigned int fraggap; unsigned int alloclen; - BUG_TRAP(copy == 0); + struct sk_buff *skb_prev; alloc_new_skb: - datalen = maxfraglen - fragheaderlen; - if (datalen > length) - datalen = length; + skb_prev = skb; + + /* There's no room in the current skb */ + if (skb_prev) + fraggap = skb_prev->len - maxfraglen; + else + fraggap = 0; + + /* + * If remaining data exceeds the mtu, + * we know we need more fragment(s). + */ + datalen = length + fraggap; + if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) + datalen = maxfraglen - fragheaderlen; + fraglen = datalen + fragheaderlen; if ((flags & MSG_MORE) && !(rt->u.dst.dev->features&NETIF_F_SG)) - alloclen = maxfraglen; + alloclen = mtu; else - alloclen = fraglen; + alloclen = datalen + fragheaderlen; + + /* + * The last fragment gets additional space at tail. + * Note: we overallocate on fragments with MSG_MODE + * because we have no idea if we're the last one. + */ + if (datalen == length + fraggap) + alloclen += rt->u.dst.trailer_len; + + /* + * We just reserve space for fragment header. + * Note: this may be overallocation if the message + * (without MSG_MORE) fits into the MTU. + */ alloclen += sizeof(struct frag_hdr); + if (transhdrlen) { skb = sock_alloc_send_skb(sk, alloclen + hh_len, @@ -923,7 +1155,7 @@ alloc_new_skb: */ skb->ip_summed = csummode; skb->csum = 0; - /* reserve 8 byte for fragmentation */ + /* reserve for fragmentation */ skb_reserve(skb, hh_len+sizeof(struct frag_hdr)); /* @@ -933,15 +1165,29 @@ alloc_new_skb: skb->nh.raw = data + exthdrlen; data += fragheaderlen; skb->h.raw = data + exthdrlen; - copy = datalen - transhdrlen; - if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, 0, skb) < 0) { + + if (fraggap) { + skb->csum = skb_copy_and_csum_bits( + skb_prev, maxfraglen, + data + transhdrlen, fraggap, 0); + skb_prev->csum = csum_sub(skb_prev->csum, + skb->csum); + data += fraggap; + pskb_trim_unique(skb_prev, maxfraglen); + } + copy = datalen - transhdrlen - fraggap; + if (copy < 0) { + err = -EINVAL; + kfree_skb(skb); + goto error; + } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { err = -EFAULT; kfree_skb(skb); goto error; } offset += copy; - length -= datalen; + length -= datalen - fraggap; transhdrlen = 0; exthdrlen = 0; csummode = CHECKSUM_NONE; @@ -1019,7 +1265,7 @@ alloc_new_skb: return 0; error: inet->cork.length -= length; - IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); + IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); return err; } @@ -1028,7 +1274,7 @@ int ip6_push_pending_frames(struct sock *sk) struct sk_buff *skb, *tmp_skb; struct sk_buff **tail_skb; struct in6_addr final_dst_buf, *final_dst = &final_dst_buf; - struct inet_opt *inet = inet_sk(sk); + struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *hdr; struct ipv6_txoptions *opt = np->cork.opt; @@ -1050,12 +1296,10 @@ int ip6_push_pending_frames(struct sock *sk) tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; skb->data_len += tmp_skb->len; -#if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */ skb->truesize += tmp_skb->truesize; __sock_put(tmp_skb->sk); tmp_skb->destructor = NULL; tmp_skb->sk = NULL; -#endif } ipv6_addr_copy(final_dst, &fl->fl6_dst); @@ -1067,7 +1311,8 @@ int ip6_push_pending_frames(struct sock *sk) skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr)); - *(u32*)hdr = fl->fl6_flowlabel | htonl(0x60000000); + *(__be32*)hdr = fl->fl6_flowlabel | + htonl(0x60000000 | ((int)np->cork.tclass << 20)); if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); @@ -1078,25 +1323,26 @@ int ip6_push_pending_frames(struct sock *sk) ipv6_addr_copy(&hdr->saddr, &fl->fl6_src); ipv6_addr_copy(&hdr->daddr, final_dst); + skb->priority = sk->sk_priority; + skb->dst = dst_clone(&rt->u.dst); - IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); + IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); if (err) { if (err > 0) - err = inet->recverr ? net_xmit_errno(err) : 0; + err = np->recverr ? net_xmit_errno(err) : 0; if (err) goto error; } out: inet->cork.flags &= ~IPCORK_OPT; - if (np->cork.opt) { - kfree(np->cork.opt); - np->cork.opt = NULL; - } + kfree(np->cork.opt); + np->cork.opt = NULL; if (np->cork.rt) { dst_release(&np->cork.rt->u.dst); np->cork.rt = NULL; + inet->cork.flags &= ~IPCORK_ALLFRAG; } memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); return err; @@ -1106,24 +1352,24 @@ error: void ip6_flush_pending_frames(struct sock *sk) { - struct inet_opt *inet = inet_sk(sk); + struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { - IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS); + IP6_INC_STATS(ip6_dst_idev(skb->dst), + IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); } inet->cork.flags &= ~IPCORK_OPT; - if (np->cork.opt) { - kfree(np->cork.opt); - np->cork.opt = NULL; - } + kfree(np->cork.opt); + np->cork.opt = NULL; if (np->cork.rt) { dst_release(&np->cork.rt->u.dst); np->cork.rt = NULL; + inet->cork.flags &= ~IPCORK_ALLFRAG; } memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); }