- /* Set up checksumming pointers for checksum-deferred packets
- * on Xen. Otherwise, dev_queue_xmit() will try to do this
- * when we send the packet out on the wire, and it will fail at
- * that point because skb_checksum_setup() will not look inside
- * an 802.1Q header. */
- err = vswitch_skb_checksum_setup(skb);
- if (unlikely(err)) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
-
- /* GSO is not implemented for packets with an 802.1Q header, so
- * we have to do segmentation before we add that header.
- *
- * GSO does work with hardware-accelerated VLAN tagging, but we
- * can't use hardware-accelerated VLAN tagging since it
- * requires the device to have a VLAN group configured (with
- * e.g. vconfig(8)) and we don't do that.
- *
- * Having to do this here may be a performance loss, since we
- * can't take advantage of TSO hardware support, although it
- * does not make a measurable network performance difference
- * for 1G Ethernet. Fixing that would require patching the
- * kernel (either to add GSO support to the VLAN protocol or to
- * support hardware-accelerated VLAN tagging without VLAN
- * groups configured). */
- if (skb_is_gso(skb)) {
- struct sk_buff *segs;
-
- segs = skb_gso_segment(skb, 0);
- kfree_skb(skb);
- if (unlikely(IS_ERR(segs)))
- return ERR_CAST(segs);
-
- do {
- struct sk_buff *nskb = segs->next;
- int err;
-
- segs->next = NULL;
-
- /* GSO can change the checksum type so update.*/
- compute_ip_summed(segs, true);
-
- segs = __vlan_put_tag(segs, tci);
- err = -ENOMEM;
- if (segs) {
- err = execute_actions(dp, segs,
- key, a + 1,
- n_actions - 1,
- gfp);
- }
-
- if (unlikely(err)) {
- while ((segs = nskb)) {
- nskb = segs->next;
- segs->next = NULL;
- kfree_skb(segs);
- }
- return ERR_PTR(err);
- }
-
- segs = nskb;
- } while (segs->next);
-
- skb = segs;
- compute_ip_summed(skb, true);
+ }
+ __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
+ return 0;
+}
+
+static int set_eth_addr(struct sk_buff *skb,
+ const struct ovs_key_ethernet *eth_key)
+{
+ int err;
+ err = make_writable(skb, ETH_HLEN);
+ if (unlikely(err))
+ return err;
+
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+
+ ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
+ ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
+
+ ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+
+ return 0;
+}
+
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+ __be32 *addr, __be32 new_addr)
+{
+ int transport_len = skb->len - skb_transport_offset(skb);
+
+ if (nh->protocol == IPPROTO_TCP) {
+ if (likely(transport_len >= sizeof(struct tcphdr)))
+ inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
+ *addr, new_addr, 1);
+ } else if (nh->protocol == IPPROTO_UDP) {
+ if (likely(transport_len >= sizeof(struct udphdr))) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace4(&uh->check, skb,
+ *addr, new_addr, 1);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }