struct tbl *new_table;
new_table = tbl_expand(cur_table);
- if (IS_ERR(new_table))
- return PTR_ERR(new_table);
-
- rcu_assign_pointer(port_table, new_table);
- tbl_deferred_destroy(cur_table, NULL);
+ if (IS_ERR(new_table)) {
+ if (PTR_ERR(new_table) != -ENOSPC)
+ return PTR_ERR(new_table);
+ } else {
+ rcu_assign_pointer(port_table, new_table);
+ tbl_deferred_destroy(cur_table, NULL);
+ }
}
err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
secpath_reset(skb);
ecn_decapsulate(skb, tos);
- compute_ip_summed(skb, false);
vlan_set_tci(skb, 0);
+ if (unlikely(compute_ip_summed(skb, false))) {
+ kfree_skb(skb);
+ return;
+ }
+
vport_receive(vport, skb);
}
(TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
OVS_CB(nskb)->tun_id = flow_key;
- compute_ip_summed(nskb, false);
+ if (unlikely(compute_ip_summed(nskb, false))) {
+ kfree_skb(nskb);
+ return false;
+ }
+
vport_receive(vport, nskb);
return true;
err = flow_extract(skb, dst_vport->port_no, &flow_key,
&flow_key_len, &is_frag);
- kfree_skb(skb);
+ consume_skb(skb);
if (err || is_frag)
goto done;
}
}
-static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
-{
- if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
- struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
- if (unlikely(!nskb)) {
- kfree_skb(skb);
- return ERR_PTR(-ENOMEM);
- }
-
- set_skb_csum_bits(skb, nskb);
-
- if (skb->sk)
- skb_set_owner_w(nskb, skb->sk);
-
- kfree_skb(skb);
- return nskb;
- }
-
- return skb;
-}
-
static inline bool need_linearize(const struct sk_buff *skb)
{
int i;
int min_headroom;
int err;
- forward_ip_summed(skb);
-
- err = vswitch_skb_checksum_setup(skb);
- if (unlikely(err))
- goto error_free;
-
min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
+ mutable->tunnel_hlen
+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
- skb = check_headroom(skb, min_headroom);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
- goto error;
+ if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+ int head_delta = SKB_DATA_ALIGN(min_headroom -
+ skb_headroom(skb) +
+ 16);
+ err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+ 0, GFP_ATOMIC);
+ if (unlikely(err))
+ goto error_free;
}
+ forward_ip_summed(skb, true);
+
if (skb_is_gso(skb)) {
struct sk_buff *nskb;
nskb = skb_gso_segment(skb, 0);
- kfree_skb(skb);
if (IS_ERR(nskb)) {
+ kfree_skb(skb);
err = PTR_ERR(nskb);
goto error;
}
+ consume_skb(skb);
skb = nskb;
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
/* Pages aren't locked and could change at any time.
* If this happens after we compute the checksum, the
* checksum will be wrong. We linearize now to avoid
err = skb_checksum_help(skb);
if (unlikely(err))
goto error_free;
- } else if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ set_ip_summed(skb, OVS_CSUM_NONE);
return skb;
ip_send_check(iph);
if (cache_vport) {
+ if (unlikely(compute_ip_summed(skb, true))) {
+ kfree_skb(skb);
+ goto next;
+ }
+
OVS_CB(skb)->flow = cache->flow;
- compute_ip_summed(skb, true);
vport_receive(cache_vport, skb);
sent_len += orig_len;
} else {