+ struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
+ return &ofproto->up;
+}
+
+static void
+dealloc(struct ofproto *ofproto_)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ free(ofproto);
+}
+
+static void
+close_dpif_backer(struct dpif_backer *backer)
+{
+ struct shash_node *node;
+
+ assert(backer->refcount > 0);
+
+ if (--backer->refcount) {
+ return;
+ }
+
+ hmap_destroy(&backer->odp_to_ofport_map);
+ node = shash_find(&all_dpif_backers, backer->type);
+ free(backer->type);
+ shash_delete(&all_dpif_backers, node);
+ dpif_close(backer->dpif);
+
+ free(backer);
+}
+
+/* Datapath port slated for removal from datapath. */
+struct odp_garbage {
+ struct list list_node;
+ uint32_t odp_port;
+};
+
+static int
+open_dpif_backer(const char *type, struct dpif_backer **backerp)
+{
+ struct dpif_backer *backer;
+ struct dpif_port_dump port_dump;
+ struct dpif_port port;
+ struct shash_node *node;
+ struct list garbage_list;
+ struct odp_garbage *garbage, *next;
+ struct sset names;
+ char *backer_name;
+ const char *name;
+ int error;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (backer) {
+ backer->refcount++;
+ *backerp = backer;
+ return 0;
+ }
+
+ backer_name = xasprintf("ovs-%s", type);
+
+ /* Remove any existing datapaths, since we assume we're the only
+ * userspace controlling the datapath. */
+ sset_init(&names);
+ dp_enumerate_names(type, &names);
+ SSET_FOR_EACH(name, &names) {
+ struct dpif *old_dpif;
+
+ /* Don't remove our backer if it exists. */
+ if (!strcmp(name, backer_name)) {
+ continue;
+ }
+
+ if (dpif_open(name, type, &old_dpif)) {
+ VLOG_WARN("couldn't open old datapath %s to remove it", name);
+ } else {
+ dpif_delete(old_dpif);
+ dpif_close(old_dpif);
+ }
+ }
+ sset_destroy(&names);
+
+ backer = xmalloc(sizeof *backer);
+
+ error = dpif_create_and_open(backer_name, type, &backer->dpif);
+ free(backer_name);
+ if (error) {
+ VLOG_ERR("failed to open datapath of type %s: %s", type,
+ strerror(error));
+ return error;
+ }
+
+ backer->type = xstrdup(type);
+ backer->refcount = 1;
+ hmap_init(&backer->odp_to_ofport_map);
+ timer_set_duration(&backer->next_expiration, 1000);
+ *backerp = backer;
+
+ dpif_flow_flush(backer->dpif);
+
+ /* Loop through the ports already on the datapath and remove any
+ * that we don't need anymore. */
+ list_init(&garbage_list);
+ dpif_port_dump_start(&port_dump, backer->dpif);
+ while (dpif_port_dump_next(&port_dump, &port)) {
+ node = shash_find(&init_ofp_ports, port.name);
+ if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
+ garbage = xmalloc(sizeof *garbage);
+ garbage->odp_port = port.port_no;
+ list_push_front(&garbage_list, &garbage->list_node);
+ }
+ }
+ dpif_port_dump_done(&port_dump);
+
+ LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) {
+ dpif_port_del(backer->dpif, garbage->odp_port);
+ list_remove(&garbage->list_node);
+ free(garbage);
+ }
+
+ shash_add(&all_dpif_backers, type, backer);
+
+ error = dpif_recv_set(backer->dpif, true);
+ if (error) {
+ VLOG_ERR("failed to listen on datapath of type %s: %s",
+ type, strerror(error));
+ close_dpif_backer(backer);
+ return error;
+ }
+
+ return error;
+}
+
+static int
+construct(struct ofproto *ofproto_)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct shash_node *node, *next;
+ int max_ports;
+ int error;
+ int i;
+
+ error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
+ if (error) {
+ return error;
+ }
+
+ max_ports = dpif_get_max_ports(ofproto->backer->dpif);
+ ofproto_init_max_ports(ofproto_, MIN(max_ports, OFPP_MAX));
+
+ ofproto->n_matches = 0;
+
+ ofproto->netflow = NULL;
+ ofproto->sflow = NULL;
+ ofproto->stp = NULL;
+ hmap_init(&ofproto->bundles);
+ ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
+ for (i = 0; i < MAX_MIRRORS; i++) {
+ ofproto->mirrors[i] = NULL;
+ }
+ ofproto->has_bonded_bundles = false;
+
+ hmap_init(&ofproto->facets);
+ hmap_init(&ofproto->subfacets);
+ ofproto->governor = NULL;
+
+ for (i = 0; i < N_TABLES; i++) {
+ struct table_dpif *table = &ofproto->tables[i];
+
+ table->catchall_table = NULL;
+ table->other_table = NULL;
+ table->basis = random_uint32();
+ }
+ ofproto->need_revalidate = 0;
+ tag_set_init(&ofproto->revalidate_set);
+
+ list_init(&ofproto->completions);
+
+ ofproto_dpif_unixctl_init();
+
+ ofproto->has_mirrors = false;
+ ofproto->has_bundle_action = false;
+
+ hmap_init(&ofproto->vlandev_map);
+ hmap_init(&ofproto->realdev_vid_map);
+
+ sset_init(&ofproto->ports);
+ sset_init(&ofproto->port_poll_set);
+ ofproto->port_poll_errno = 0;
+
+ SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
+ const struct iface_hint *iface_hint = node->data;
+
+ if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
+ /* Check if the datapath already has this port. */
+ if (dpif_port_exists(ofproto->backer->dpif, node->name)) {
+ sset_add(&ofproto->ports, node->name);
+ }
+
+ free(iface_hint->br_name);
+ free(iface_hint->br_type);
+ shash_delete(&init_ofp_ports, node);
+ }
+ }
+
+ hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
+ hash_string(ofproto->up.name, 0));
+ memset(&ofproto->stats, 0, sizeof ofproto->stats);
+
+ ofproto_init_tables(ofproto_, N_TABLES);
+ error = add_internal_flows(ofproto);
+ ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
+
+ return error;
+}
+
+static int
+add_internal_flow(struct ofproto_dpif *ofproto, int id,
+ const struct ofpbuf *ofpacts, struct rule_dpif **rulep)
+{
+ struct ofputil_flow_mod fm;
+ int error;
+
+ match_init_catchall(&fm.match);
+ fm.priority = 0;
+ match_set_reg(&fm.match, 0, id);
+ fm.new_cookie = htonll(0);
+ fm.cookie = htonll(0);
+ fm.cookie_mask = htonll(0);
+ fm.table_id = TBL_INTERNAL;
+ fm.command = OFPFC_ADD;
+ fm.idle_timeout = 0;
+ fm.hard_timeout = 0;
+ fm.buffer_id = 0;
+ fm.out_port = 0;
+ fm.flags = 0;
+ fm.ofpacts = ofpacts->data;
+ fm.ofpacts_len = ofpacts->size;
+
+ error = ofproto_flow_mod(&ofproto->up, &fm);
+ if (error) {
+ VLOG_ERR_RL(&rl, "failed to add internal flow %d (%s)",
+ id, ofperr_to_string(error));
+ return error;
+ }
+
+ *rulep = rule_dpif_lookup__(ofproto, &fm.match.flow, TBL_INTERNAL);
+ assert(*rulep != NULL);
+
+ return 0;
+}
+
+static int
+add_internal_flows(struct ofproto_dpif *ofproto)
+{
+ struct ofpact_controller *controller;
+ uint64_t ofpacts_stub[128 / 8];
+ struct ofpbuf ofpacts;
+ int error;
+ int id;
+
+ ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
+ id = 1;
+
+ controller = ofpact_put_CONTROLLER(&ofpacts);
+ controller->max_len = UINT16_MAX;
+ controller->controller_id = 0;
+ controller->reason = OFPR_NO_MATCH;
+ ofpact_pad(&ofpacts);
+
+ error = add_internal_flow(ofproto, id++, &ofpacts, &ofproto->miss_rule);
+ if (error) {
+ return error;
+ }
+
+ ofpbuf_clear(&ofpacts);
+ error = add_internal_flow(ofproto, id++, &ofpacts,
+ &ofproto->no_packet_in_rule);
+ return error;
+}
+
+static void
+complete_operations(struct ofproto_dpif *ofproto)
+{
+ struct dpif_completion *c, *next;
+
+ LIST_FOR_EACH_SAFE (c, next, list_node, &ofproto->completions) {
+ ofoperation_complete(c->op, 0);
+ list_remove(&c->list_node);
+ free(c);
+ }
+}
+
+static void
+destruct(struct ofproto *ofproto_)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);