-static bool check_ipv4_address(__be32 addr)
-{
- if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr)
- || ipv4_is_loopback(addr) || ipv4_is_zeronet(addr))
- return false;
-
- return true;
-}
-
-static bool ipv4_should_icmp(struct sk_buff *skb)
-{
- struct iphdr *old_iph = ip_hdr(skb);
-
- /* Don't respond to L2 broadcast. */
- if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
- return false;
-
- /* Don't respond to L3 broadcast or invalid addresses. */
- if (!check_ipv4_address(old_iph->daddr) ||
- !check_ipv4_address(old_iph->saddr))
- return false;
-
- /* Only respond to the first fragment. */
- if (old_iph->frag_off & htons(IP_OFFSET))
- return false;
-
- /* Don't respond to ICMP error messages. */
- if (old_iph->protocol == IPPROTO_ICMP) {
- u8 icmp_type, *icmp_typep;
-
- icmp_typep = skb_header_pointer(skb, (u8 *)old_iph +
- (old_iph->ihl << 2) +
- offsetof(struct icmphdr, type) -
- skb->data, sizeof(icmp_type),
- &icmp_type);
-
- if (!icmp_typep)
- return false;
-
- if (*icmp_typep > NR_ICMP_TYPES
- || (*icmp_typep <= ICMP_PARAMETERPROB
- && *icmp_typep != ICMP_ECHOREPLY
- && *icmp_typep != ICMP_ECHO))
- return false;
- }
-
- return true;
-}
-
-static void ipv4_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
- unsigned int mtu, unsigned int payload_length)
-{
- struct iphdr *iph, *old_iph = ip_hdr(skb);
- struct icmphdr *icmph;
- u8 *payload;
-
- iph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
- icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
- payload = skb_put(nskb, payload_length);
-
- /* IP */
- iph->version = 4;
- iph->ihl = sizeof(struct iphdr) >> 2;
- iph->tos = (old_iph->tos & IPTOS_TOS_MASK) |
- IPTOS_PREC_INTERNETCONTROL;
- iph->tot_len = htons(sizeof(struct iphdr)
- + sizeof(struct icmphdr)
- + payload_length);
- get_random_bytes(&iph->id, sizeof(iph->id));
- iph->frag_off = 0;
- iph->ttl = IPDEFTTL;
- iph->protocol = IPPROTO_ICMP;
- iph->daddr = old_iph->saddr;
- iph->saddr = old_iph->daddr;
-
- ip_send_check(iph);
-
- /* ICMP */
- icmph->type = ICMP_DEST_UNREACH;
- icmph->code = ICMP_FRAG_NEEDED;
- icmph->un.gateway = htonl(mtu);
- icmph->checksum = 0;
-
- nskb->csum = csum_partial((u8 *)icmph, sizeof(struct icmphdr), 0);
- nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_iph - skb->data,
- payload, payload_length,
- nskb->csum);
- icmph->checksum = csum_fold(nskb->csum);
-}
-
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static bool ipv6_should_icmp(struct sk_buff *skb)
-{
- struct ipv6hdr *old_ipv6h = ipv6_hdr(skb);
- int addr_type;
- int payload_off = (u8 *)(old_ipv6h + 1) - skb->data;
- u8 nexthdr = ipv6_hdr(skb)->nexthdr;
- __be16 frag_off;
-
- /* Check source address is valid. */
- addr_type = ipv6_addr_type(&old_ipv6h->saddr);
- if (addr_type & IPV6_ADDR_MULTICAST || addr_type == IPV6_ADDR_ANY)
- return false;
-
- /* Don't reply to unspecified addresses. */
- if (ipv6_addr_type(&old_ipv6h->daddr) == IPV6_ADDR_ANY)
- return false;
-
- /* Don't respond to ICMP error messages. */
- payload_off = ipv6_skip_exthdr(skb, payload_off, &nexthdr, &frag_off);
- if (payload_off < 0)
- return false;
-
- if (nexthdr == NEXTHDR_ICMP) {
- u8 icmp_type, *icmp_typep;
-
- icmp_typep = skb_header_pointer(skb, payload_off +
- offsetof(struct icmp6hdr,
- icmp6_type),
- sizeof(icmp_type), &icmp_type);
-
- if (!icmp_typep || !(*icmp_typep & ICMPV6_INFOMSG_MASK))
- return false;
- }
-
- return true;
-}
-
-static void ipv6_build_icmp(struct sk_buff *skb, struct sk_buff *nskb,
- unsigned int mtu, unsigned int payload_length)
-{
- struct ipv6hdr *ipv6h, *old_ipv6h = ipv6_hdr(skb);
- struct icmp6hdr *icmp6h;
- u8 *payload;
-
- ipv6h = (struct ipv6hdr *)skb_put(nskb, sizeof(struct ipv6hdr));
- icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
- payload = skb_put(nskb, payload_length);
-
- /* IPv6 */
- ipv6h->version = 6;
- ipv6h->priority = 0;
- memset(&ipv6h->flow_lbl, 0, sizeof(ipv6h->flow_lbl));
- ipv6h->payload_len = htons(sizeof(struct icmp6hdr)
- + payload_length);
- ipv6h->nexthdr = NEXTHDR_ICMP;
- ipv6h->hop_limit = IPV6_DEFAULT_HOPLIMIT;
- ipv6h->daddr = old_ipv6h->saddr;
- ipv6h->saddr = old_ipv6h->daddr;
-
- /* ICMPv6 */
- icmp6h->icmp6_type = ICMPV6_PKT_TOOBIG;
- icmp6h->icmp6_code = 0;
- icmp6h->icmp6_cksum = 0;
- icmp6h->icmp6_mtu = htonl(mtu);
-
- nskb->csum = csum_partial((u8 *)icmp6h, sizeof(struct icmp6hdr), 0);
- nskb->csum = skb_copy_and_csum_bits(skb, (u8 *)old_ipv6h - skb->data,
- payload, payload_length,
- nskb->csum);
- icmp6h->icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
- sizeof(struct icmp6hdr)
- + payload_length,
- ipv6h->nexthdr, nskb->csum);
-}
-#endif /* IPv6 */
-
-bool ovs_tnl_frag_needed(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
-{
- unsigned int eth_hdr_len = ETH_HLEN;
- unsigned int total_length = 0, header_length = 0, payload_length;
- struct ethhdr *eh, *old_eh = eth_hdr(skb);
- struct sk_buff *nskb;
-
- /* Sanity check */
- if (skb->protocol == htons(ETH_P_IP)) {
- if (mtu < IP_MIN_MTU)
- return false;
-
- if (!ipv4_should_icmp(skb))
- return true;
- }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- else if (skb->protocol == htons(ETH_P_IPV6)) {
- if (mtu < IPV6_MIN_MTU)
- return false;
-
- /*
- * In theory we should do PMTUD on IPv6 multicast messages but
- * we don't have an address to send from so just fragment.
- */
- if (ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST)
- return false;
-
- if (!ipv6_should_icmp(skb))
- return true;
- }
-#endif
- else
- return false;
-
- /* Allocate */
- if (old_eh->h_proto == htons(ETH_P_8021Q))
- eth_hdr_len = VLAN_ETH_HLEN;
-
- payload_length = skb->len - eth_hdr_len;
- if (skb->protocol == htons(ETH_P_IP)) {
- header_length = sizeof(struct iphdr) + sizeof(struct icmphdr);
- total_length = min_t(unsigned int, header_length +
- payload_length, 576);
- }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- else {
- header_length = sizeof(struct ipv6hdr) +
- sizeof(struct icmp6hdr);
- total_length = min_t(unsigned int, header_length +
- payload_length, IPV6_MIN_MTU);
- }
-#endif
-
- payload_length = total_length - header_length;
-
- nskb = dev_alloc_skb(NET_IP_ALIGN + eth_hdr_len + header_length +
- payload_length);
- if (!nskb)
- return false;
-
- skb_reserve(nskb, NET_IP_ALIGN);
-
- /* Ethernet / VLAN */
- eh = (struct ethhdr *)skb_put(nskb, eth_hdr_len);
- memcpy(eh->h_dest, old_eh->h_source, ETH_ALEN);
- memcpy(eh->h_source, mutable->eth_addr, ETH_ALEN);
- nskb->protocol = eh->h_proto = old_eh->h_proto;
- if (old_eh->h_proto == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *vh = (struct vlan_ethhdr *)eh;
-
- vh->h_vlan_TCI = vlan_eth_hdr(skb)->h_vlan_TCI;
- vh->h_vlan_encapsulated_proto = skb->protocol;
- } else
- vlan_set_tci(nskb, vlan_get_tci(skb));
- skb_reset_mac_header(nskb);
-
- /* Protocol */
- if (skb->protocol == htons(ETH_P_IP))
- ipv4_build_icmp(skb, nskb, mtu, payload_length);
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- else
- ipv6_build_icmp(skb, nskb, mtu, payload_length);
-#endif
-
- /*
- * Assume that flow based keys are symmetric with respect to input
- * and output and use the key that we were going to put on the
- * outgoing packet for the fake received packet. If the keys are
- * not symmetric then PMTUD needs to be disabled since we won't have
- * any way of synthesizing packets.
- */
- if ((mutable->flags & (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION)) ==
- (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
- OVS_CB(nskb)->tun_id = flow_key;
-
- if (unlikely(compute_ip_summed(nskb, false))) {
- kfree_skb(nskb);
- return false;
- }
-
- ovs_vport_receive(vport, nskb);
-
- return true;
-}
-
-static bool check_mtu(struct sk_buff *skb,
- struct vport *vport,
- const struct tnl_mutable_config *mutable,
- const struct rtable *rt, __be16 *frag_offp)
-{
- bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
- bool pmtud = mutable->flags & TNL_F_PMTUD;
- __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
- int mtu = 0;
- unsigned int packet_length = skb->len - ETH_HLEN;
-
- /* Allow for one level of tagging in the packet length. */
- if (!vlan_tx_tag_present(skb) &&
- eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
- packet_length -= VLAN_HLEN;
-
- if (pmtud) {
- int vlan_header = 0;
-
- /* The tag needs to go in packet regardless of where it
- * currently is, so subtract it from the MTU.
- */
- if (vlan_tx_tag_present(skb) ||
- eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
- vlan_header = VLAN_HLEN;
-
- mtu = dst_mtu(&rt_dst(rt))
- - ETH_HLEN
- - mutable->tunnel_hlen
- - vlan_header;
- }
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
-
- if (df_inherit)
- frag_off = iph->frag_off & htons(IP_DF);
-
- if (pmtud && iph->frag_off & htons(IP_DF)) {
- mtu = max(mtu, IP_MIN_MTU);
-
- if (packet_length > mtu &&
- ovs_tnl_frag_needed(vport, mutable, skb, mtu,
- OVS_CB(skb)->tun_id))
- return false;
- }
- }
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- else if (skb->protocol == htons(ETH_P_IPV6)) {
- /* IPv6 requires end hosts to do fragmentation
- * if the packet is above the minimum MTU.
- */
- if (df_inherit && packet_length > IPV6_MIN_MTU)
- frag_off = htons(IP_DF);
-
- if (pmtud) {
- mtu = max(mtu, IPV6_MIN_MTU);
-
- if (packet_length > mtu &&
- ovs_tnl_frag_needed(vport, mutable, skb, mtu,
- OVS_CB(skb)->tun_id))
- return false;
- }
- }
-#endif
-
- *frag_offp = frag_off;
- return true;
-}
-
-static void create_tunnel_header(const struct vport *vport,
- const struct tnl_mutable_config *mutable,
- const struct rtable *rt, void *header)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct iphdr *iph = header;
-
- iph->version = 4;
- iph->ihl = sizeof(struct iphdr) >> 2;
- iph->frag_off = htons(IP_DF);
- iph->protocol = tnl_vport->tnl_ops->ipproto;
- iph->tos = mutable->tos;
- iph->daddr = rt->rt_dst;
- iph->saddr = rt->rt_src;
- iph->ttl = mutable->ttl;
- if (!iph->ttl)
- iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
-
- tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
-}
-
-static void *get_cached_header(const struct tnl_cache *cache)
-{
- return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
-}
-
-#ifdef HAVE_RT_GENID
-static inline int rt_genid(struct net *net)
-{
- return atomic_read(&net->ipv4.rt_genid);
-}
-#endif
-
-static bool check_cache_valid(const struct tnl_cache *cache,
- const struct tnl_mutable_config *mutable)
-{
- struct hh_cache *hh;
-
- if (!cache)
- return false;
-
- hh = rt_hh(cache->rt);
- return hh &&
-#ifdef NEED_CACHE_TIMEOUT
- time_before(jiffies, cache->expiration) &&
-#endif
-#ifdef HAVE_RT_GENID
- rt_genid(dev_net(rt_dst(cache->rt).dev)) == cache->rt->rt_genid &&
-#endif
-#ifdef HAVE_HH_SEQ
- hh->hh_lock.sequence == cache->hh_seq &&
-#endif
- mutable->seq == cache->mutable_seq &&
- (!ovs_is_internal_dev(rt_dst(cache->rt).dev) ||
- (cache->flow && !cache->flow->dead));
-}
-
-static void __cache_cleaner(struct tnl_vport *tnl_vport)
-{
- const struct tnl_mutable_config *mutable =
- rcu_dereference(tnl_vport->mutable);
- const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
-
- if (cache && !check_cache_valid(cache, mutable) &&
- spin_trylock_bh(&tnl_vport->cache_lock)) {
- assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
- spin_unlock_bh(&tnl_vport->cache_lock);
- }
-}
-
-static void cache_cleaner(struct work_struct *work)
-{
- int i;
-
- schedule_cache_cleaner();
-
- rcu_read_lock();
- for (i = 0; i < PORT_TABLE_SIZE; i++) {
- struct hlist_node *n;
- struct hlist_head *bucket;
- struct tnl_vport *tnl_vport;
-
- bucket = &port_table[i];
- hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
- __cache_cleaner(tnl_vport);
- }
- rcu_read_unlock();
-}
-
-static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
-{
- void *cache_data = get_cached_header(cache);
- int hh_off;
-
-#ifdef HAVE_HH_SEQ
- unsigned hh_seq;
-
- do {
- hh_seq = read_seqbegin(&hh->hh_lock);
- hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
- memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
- cache->hh_len = hh->hh_len;
- } while (read_seqretry(&hh->hh_lock, hh_seq));
-
- cache->hh_seq = hh_seq;
-#else
- read_lock(&hh->hh_lock);
- hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
- memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
- cache->hh_len = hh->hh_len;
- read_unlock(&hh->hh_lock);
-#endif
-}
-
-static struct tnl_cache *build_cache(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct rtable *rt)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *cache;
- void *cache_data;
- int cache_len;
- struct hh_cache *hh;
-
- if (!(mutable->flags & TNL_F_HDR_CACHE))
- return NULL;
-
- /*
- * If there is no entry in the ARP cache or if this device does not
- * support hard header caching just fall back to the IP stack.
- */
-
- hh = rt_hh(rt);
- if (!hh)
- return NULL;
-
- /*
- * If lock is contended fall back to directly building the header.
- * We're not going to help performance by sitting here spinning.
- */
- if (!spin_trylock(&tnl_vport->cache_lock))
- return NULL;
-
- cache = cache_dereference(tnl_vport);
- if (check_cache_valid(cache, mutable))
- goto unlock;
- else
- cache = NULL;
-
- cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen;
-
- cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
- cache_len, GFP_ATOMIC);
- if (!cache)
- goto unlock;
-
- create_eth_hdr(cache, hh);
- cache_data = get_cached_header(cache) + cache->hh_len;
- cache->len = cache->hh_len + mutable->tunnel_hlen;
-
- create_tunnel_header(vport, mutable, rt, cache_data);
-
- cache->mutable_seq = mutable->seq;
- cache->rt = rt;
-#ifdef NEED_CACHE_TIMEOUT
- cache->expiration = jiffies + tnl_vport->cache_exp_interval;
-#endif
-
- if (ovs_is_internal_dev(rt_dst(rt).dev)) {
- struct sw_flow_key flow_key;
- struct vport *dst_vport;
- struct sk_buff *skb;
- int err;
- int flow_key_len;
- struct sw_flow *flow;
-
- dst_vport = ovs_internal_dev_get_vport(rt_dst(rt).dev);
- if (!dst_vport)
- goto done;
-
- skb = alloc_skb(cache->len, GFP_ATOMIC);
- if (!skb)
- goto done;
-
- __skb_put(skb, cache->len);
- memcpy(skb->data, get_cached_header(cache), cache->len);
-
- err = ovs_flow_extract(skb, dst_vport->port_no, &flow_key,
- &flow_key_len);
-
- consume_skb(skb);
- if (err)
- goto done;
-
- flow = ovs_flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
- &flow_key, flow_key_len);
- if (flow) {
- cache->flow = flow;
- ovs_flow_hold(flow);
- }
- }
-
-done:
- assign_cache_rcu(vport, cache);
-
-unlock:
- spin_unlock(&tnl_vport->cache_lock);
-
- return cache;
-}
-
-static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
- u8 ipproto, u8 tos)