}
EXPORT_SYMBOL_GPL(get_dp);
-struct datapath *get_dp_locked(int dp_idx)
+static struct datapath *get_dp_locked(int dp_idx)
{
struct datapath *dp;
kfree(dp);
}
-struct kobj_type dp_ktype = {
+static struct kobj_type dp_ktype = {
.release = release_dp
};
kfree(p);
}
-struct kobj_type brport_ktype = {
+static struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
.sysfs_ops = &brport_sysfs_ops,
#endif
* in dp_frame_hook(). In turn dp_frame_hook() can reject them
* back to network stack, but that's a waste of time. */
}
+ dev_disable_lro(dev);
rcu_assign_pointer(dp->ports[port_no], p);
list_add_rcu(&p->node, &dp->port_list);
dp->n_ports++;
set_dp_devs_mtu(dp, dev);
dp_sysfs_add_if(dp->ports[port_no]);
- err = __put_user(port_no, &port.port);
+ err = __put_user(port_no, &portp->port);
out_put:
dev_put(dev);
static void
do_port_input(struct net_bridge_port *p, struct sk_buff *skb)
{
+ /* LRO isn't suitable for bridging. We turn it off but make sure
+ * that it wasn't reactivated. */
+ if (skb_warn_if_lro(skb))
+ return;
+
/* Make our own copy of the packet. Otherwise we will mangle the
* packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
* (No one comes after us, since we tell handle_bridge() that we took
WARN_ON_ONCE(skb_shared(skb));
+ compute_ip_summed(skb, false);
+
/* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
if (skb->protocol != htons(ETH_P_IP))
goto out;
- if (!skb_pull_up_to(skb, skb_network_header(skb) + 1))
+ if (!skb_pull_up_to(skb, skb_network_header(skb) + sizeof(struct iphdr)))
goto out;
iph = ip_hdr(skb);
* skb_forward_csum(). It is slightly different because we are only concerned
* with bridging and not other types of forwarding and can get away with
* slightly more optimal behavior.*/
+void
+compute_ip_summed(struct sk_buff *skb, bool xmit)
+{
+ /* For our convenience these defines change repeatedly between kernel
+ * versions, so we can't just copy them over... */
+ switch (skb->ip_summed) {
+ case CHECKSUM_NONE:
+ OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
+ break;
+ case CHECKSUM_UNNECESSARY:
+ OVS_CB(skb)->ip_summed = OVS_CSUM_UNNECESSARY;
+ break;
+#ifdef CHECKSUM_HW
+ /* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
+ * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
+ * uses some special fields to represent this (see below). Since we
+ * can only make one type work, pick the one that actually happens in
+ * practice.
+ *
+ * The one exception to this is if we are on the transmit path
+ * (basically after skb_checksum_setup() has been run) the type has
+ * already been converted, so we should stay with that. */
+ case CHECKSUM_HW:
+ if (!xmit)
+ OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
+ else
+ OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
+
+ break;
+#else
+ case CHECKSUM_COMPLETE:
+ OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
+ break;
+ case CHECKSUM_PARTIAL:
+ OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
+ break;
+#endif
+ default:
+ printk(KERN_ERR "openvswitch: unknown checksum type %d\n",
+ skb->ip_summed);
+ /* None seems the safest... */
+ OVS_CB(skb)->ip_summed = OVS_CSUM_NONE;
+ }
+
+#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
+ /* Xen has a special way of representing CHECKSUM_PARTIAL on older
+ * kernels. It should not be set on the transmit path though. */
+ if (skb->proto_csum_blank)
+ OVS_CB(skb)->ip_summed = OVS_CSUM_PARTIAL;
+
+ WARN_ON_ONCE(skb->proto_csum_blank && xmit);
+#endif
+}
+
void
forward_ip_summed(struct sk_buff *skb)
{
#ifdef CHECKSUM_HW
- if (skb->ip_summed == CHECKSUM_HW)
+ if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE;
#endif
}
error = -EFAULT;
if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
goto error;
- uf.flow.key.reserved = 0;
+ memset(uf.flow.key.reserved, 0, sizeof uf.flow.key.reserved);
table = rcu_dereference(dp->table);
flow = dp_table_lookup(table, &uf.flow.key);
error = -EFAULT;
if (copy_from_user(&uf, ufp, sizeof uf))
goto error;
- uf.key.reserved = 0;
+ memset(uf.key.reserved, 0, sizeof uf.key.reserved);
flow = dp_table_lookup(table, &uf.key);
error = -ENOENT;
if (__copy_from_user(&uf, ufp, sizeof uf))
return -EFAULT;
- uf.key.reserved = 0;
+ memset(uf.key.reserved, 0, sizeof uf.key.reserved);
flow = dp_table_lookup(table, &uf.key);
if (!flow)