+static struct genl_family dp_packet_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = sizeof(struct ovs_header),
+ .name = OVS_PACKET_FAMILY,
+ .version = 1,
+ .maxattr = OVS_PACKET_ATTR_MAX
+};
+
+/* Generic Netlink multicast groups for upcalls.
+ *
+ * We really want three unique multicast groups per datapath, but we can't even
+ * get one, because genl_register_mc_group() takes genl_lock, which is also
+ * held during Generic Netlink message processing, so trying to acquire
+ * multicast groups during OVS_DP_NEW processing deadlocks. Instead, we
+ * preallocate a few groups and use them round-robin for datapaths. Collision
+ * isn't fatal--multicast listeners should check that the family is the one
+ * that they want and discard others--but it wastes time and memory to receive
+ * unwanted messages.
+ */
+#define PACKET_N_MC_GROUPS 16
+static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
+
+static u32 packet_mc_group(struct datapath *dp, u8 cmd)
+{
+ u32 idx;
+ BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
+
+ idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
+ return packet_mc_groups[idx].id;
+}
+
+static int packet_register_mc_groups(void)
+{
+ int i;
+
+ for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
+ struct genl_multicast_group *group = &packet_mc_groups[i];
+ int error;
+
+ sprintf(group->name, "packet%d", i);
+ error = genl_register_mc_group(&dp_packet_genl_family, group);
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
+{
+ struct dp_stats_percpu *stats;
+ int err;
+
+ WARN_ON_ONCE(skb_shared(skb));
+
+ forward_ip_summed(skb, true);
+
+ /* Break apart GSO packets into their component pieces. Otherwise
+ * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
+ if (skb_is_gso(skb)) {
+ struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
+
+ if (IS_ERR(nskb)) {
+ kfree_skb(skb);
+ err = PTR_ERR(nskb);
+ goto err;
+ }
+ consume_skb(skb);
+ skb = nskb;
+ }
+
+ err = queue_userspace_packets(dp, skb, upcall_info);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ local_bh_disable();
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+
+ write_seqcount_begin(&stats->seqlock);
+ stats->n_lost++;
+ write_seqcount_end(&stats->seqlock);
+
+ local_bh_enable();
+
+ return err;
+}
+
+/* Send each packet in the 'skb' list to userspace for 'dp' as directed by
+ * 'upcall_info'. There will be only one packet unless we broke up a GSO
+ * packet.
+ */
+static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,