X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fdpif-netdev.c;h=85ccaac80bb784ab39a7319b1f0d170f1173a900;hb=093f56c5c5c8b0891e837beb1defd84bc165ac6a;hp=8763e5c673ef99f37253843c4a1b70d770be2532;hpb=5279f8fdf0d424b2b90a65ffc382d58409728d57;p=sliver-openvswitch.git diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 8763e5c67..85ccaac80 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -49,9 +49,11 @@ #include "packets.h" #include "poll-loop.h" #include "random.h" +#include "seq.h" #include "shash.h" #include "sset.h" #include "timeval.h" +#include "unixctl.h" #include "util.h" #include "vlog.h" @@ -91,6 +93,7 @@ struct dp_netdev { struct dp_netdev_queue queues[N_QUEUES]; struct hmap flow_table; /* Flow table. */ + struct seq *queue_seq; /* Incremented whenever a packet is queued. */ /* Statistics. */ long long int n_hit; /* Number of flow table matches. */ @@ -100,7 +103,7 @@ struct dp_netdev { /* Ports. */ struct dp_netdev_port *ports[MAX_PORTS]; struct list port_list; - unsigned int serial; + struct seq *port_seq; /* Incremented whenever a port changes. */ }; /* A port in a netdev-based datapath. */ @@ -133,14 +136,14 @@ struct dp_netdev_flow { struct dpif_netdev { struct dpif dpif; struct dp_netdev *dp; - unsigned int dp_serial; + uint64_t last_port_seq; }; /* All netdev-based datapaths. */ static struct shash dp_netdevs = SHASH_INITIALIZER(&dp_netdevs); /* Global lock for all data. */ -static pthread_mutex_t dp_netdev_mutex = PTHREAD_MUTEX_INITIALIZER; +static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER; static int get_port_by_number(struct dp_netdev *, odp_port_t port_no, struct dp_netdev_port **portp); @@ -163,7 +166,7 @@ static void dp_netdev_execute_actions(struct dp_netdev *, static void dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port, struct ofpbuf *packet, uint32_t skb_priority, - uint32_t skb_mark, const struct flow_tnl *tnl); + uint32_t pkt_mark, const struct flow_tnl *tnl); static struct dpif_netdev * dpif_netdev_cast(const struct dpif *dpif) @@ -183,11 +186,11 @@ dpif_netdev_enumerate(struct sset *all_dps) { struct shash_node *node; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); SHASH_FOR_EACH(node, &dp_netdevs) { sset_add(all_dps, node->name); } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return 0; } @@ -198,10 +201,17 @@ dpif_netdev_class_is_dummy(const struct dpif_class *class) return class != &dpif_netdev_class; } +static bool +dpif_netdev_class_is_planetlab(const struct dpif_class *class) +{ + return class == &dpif_planetlab_class; +} + static const char * dpif_netdev_port_open_type(const struct dpif_class *class, const char *type) { return strcmp(type, "internal") ? type + : dpif_netdev_class_is_planetlab(class) ? "pltap" : dpif_netdev_class_is_dummy(class) ? "dummy" : "tap"; } @@ -217,7 +227,7 @@ create_dpif_netdev(struct dp_netdev *dp) dpif = xmalloc(sizeof *dpif); dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id); dpif->dp = dp; - dpif->dp_serial = dp->serial; + dpif->last_port_seq = seq_read(dp->port_seq); return &dpif->dpif; } @@ -229,7 +239,8 @@ choose_port(struct dp_netdev *dp, const char *name) { uint32_t port_no; - if (dp->class != &dpif_netdev_class) { + if (dp->class != &dpif_netdev_class && + dp->class != &dpif_planetlab_class) { const char *p; int start_no = 0; @@ -279,8 +290,10 @@ create_dp_netdev(const char *name, const struct dpif_class *class, for (i = 0; i < N_QUEUES; i++) { dp->queues[i].head = dp->queues[i].tail = 0; } + dp->queue_seq = seq_create(); hmap_init(&dp->flow_table); list_init(&dp->port_list); + dp->port_seq = seq_create(); error = do_add_port(dp, name, "internal", ODPP_LOCAL); if (error) { @@ -301,7 +314,7 @@ dpif_netdev_open(const struct dpif_class *class, const char *name, struct dp_netdev *dp; int error; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); dp = shash_find_data(&dp_netdevs, name); if (!dp) { error = create ? create_dp_netdev(name, class, &dp) : ENODEV; @@ -313,7 +326,7 @@ dpif_netdev_open(const struct dpif_class *class, const char *name, if (!error) { *dpifp = create_dpif_netdev(dp); } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } @@ -343,7 +356,9 @@ dp_netdev_free(struct dp_netdev *dp) do_del_port(dp, port->port_no); } dp_netdev_purge_queues(dp); + seq_destroy(dp->queue_seq); hmap_destroy(&dp->flow_table); + seq_destroy(dp->port_seq); free(dp->name); free(dp); } @@ -353,7 +368,7 @@ dpif_netdev_close(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); ovs_assert(dp->open_cnt > 0); if (--dp->open_cnt == 0 && dp->destroyed) { @@ -362,7 +377,7 @@ dpif_netdev_close(struct dpif *dpif) } free(dpif); - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); } static int @@ -370,9 +385,9 @@ dpif_netdev_destroy(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); dp->destroyed = true; - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return 0; } @@ -382,12 +397,12 @@ dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) { struct dp_netdev *dp = get_dp_netdev(dpif); - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); stats->n_flows = hmap_count(&dp->flow_table); stats->n_hit = dp->n_hit; stats->n_missed = dp->n_missed; stats->n_lost = dp->n_lost; - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return 0; } @@ -400,6 +415,7 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, struct dp_netdev_port *port; struct netdev *netdev; struct netdev_rx *rx; + enum netdev_flags flags; const char *open_type; int mtu; int error; @@ -412,9 +428,15 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, if (error) { return error; } - /* XXX reject loopback devices */ /* XXX reject non-Ethernet devices */ + netdev_get_flags(netdev, &flags); + if (flags & NETDEV_LOOPBACK) { + VLOG_ERR("%s: cannot add a loopback device", devname); + netdev_close(netdev); + return EINVAL; + } + error = netdev_rx_open(netdev, &rx); if (error && !(error == EOPNOTSUPP && dpif_netdev_class_is_dummy(dp->class))) { @@ -445,7 +467,7 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, list_push_back(&dp->port_list, &port->node); dp->ports[odp_to_u32(port_no)] = port; - dp->serial++; + seq_change(dp->port_seq); return 0; } @@ -460,7 +482,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t port_no; int error; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf); if (*port_nop != ODPP_NONE) { uint32_t port_idx = odp_to_u32(*port_nop); @@ -480,7 +502,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev, *port_nop = port_no; error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no); } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } @@ -491,9 +513,9 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no) struct dp_netdev *dp = get_dp_netdev(dpif); int error; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); error = port_no == ODPP_LOCAL ? EINVAL : do_del_port(dp, port_no); - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } @@ -545,7 +567,7 @@ do_del_port(struct dp_netdev *dp, odp_port_t port_no) list_remove(&port->node); dp->ports[odp_to_u32(port_no)] = NULL; - dp->serial++; + seq_change(dp->port_seq); netdev_close(port->netdev); netdev_restore_flags(port->sf); @@ -573,12 +595,12 @@ dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no, struct dp_netdev_port *port; int error; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); error = get_port_by_number(dp, port_no, &port); if (!error && dpif_port) { answer_port_query(port, dpif_port); } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } @@ -591,20 +613,20 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname, struct dp_netdev_port *port; int error; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); error = get_port_by_name(dp, devname, &port); if (!error && dpif_port) { answer_port_query(port, dpif_port); } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } -static odp_port_t +static uint32_t dpif_netdev_get_max_ports(const struct dpif *dpif OVS_UNUSED) { - return u32_to_odp(MAX_PORTS); + return MAX_PORTS; } static void @@ -630,9 +652,9 @@ dpif_netdev_flow_flush(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); dp_netdev_flow_flush(dp); - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return 0; } @@ -657,7 +679,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_, struct dp_netdev *dp = get_dp_netdev(dpif); uint32_t port_idx; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); for (port_idx = odp_to_u32(state->port_no); port_idx < MAX_PORTS; port_idx++) { struct dp_netdev_port *port = dp->ports[port_idx]; @@ -668,12 +690,12 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_, dpif_port->type = port->type; dpif_port->port_no = port->port_no; state->port_no = u32_to_odp(port_idx + 1); - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return 0; } } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return EOF; } @@ -691,16 +713,18 @@ static int dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED) { struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); + uint64_t new_port_seq; int error; - xpthread_mutex_lock(&dp_netdev_mutex); - if (dpif->dp_serial != dpif->dp->serial) { - dpif->dp_serial = dpif->dp->serial; + ovs_mutex_lock(&dp_netdev_mutex); + new_port_seq = seq_read(dpif->dp->port_seq); + if (dpif->last_port_seq != new_port_seq) { + dpif->last_port_seq = new_port_seq; error = ENOBUFS; } else { error = EAGAIN; } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } @@ -710,15 +734,9 @@ dpif_netdev_port_poll_wait(const struct dpif *dpif_) { struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); - /* XXX In a multithreaded process, there is a race window between this - * function and the poll_block() in one thread and a change in - * dpif->dp->serial in another thread. */ - - xpthread_mutex_lock(&dp_netdev_mutex); - if (dpif->dp_serial != dpif->dp->serial) { - poll_immediate_wake(); - } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); + seq_wait(dpif->dp->port_seq, dpif->last_port_seq); + ovs_mutex_unlock(&dp_netdev_mutex); } static struct dp_netdev_flow * @@ -791,7 +809,7 @@ dpif_netdev_flow_get(const struct dpif *dpif, return error; } - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); flow = dp_netdev_lookup_flow(dp, &key); if (flow) { if (stats) { @@ -803,7 +821,7 @@ dpif_netdev_flow_get(const struct dpif *dpif, } else { error = ENOENT; } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } @@ -860,7 +878,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) return error; } - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); flow = dp_netdev_lookup_flow(dp, &key); if (!flow) { if (put->flags & DPIF_FP_CREATE) { @@ -891,7 +909,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) error = EEXIST; } } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } @@ -909,7 +927,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) return error; } - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); flow = dp_netdev_lookup_flow(dp, &key); if (flow) { if (del->stats) { @@ -919,7 +937,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) } else { error = ENOENT; } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } @@ -956,10 +974,10 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_, struct dp_netdev_flow *flow; struct hmap_node *node; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset); if (!node) { - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return EOF; } @@ -993,7 +1011,7 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_, *stats = &state->stats; } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return 0; } @@ -1029,10 +1047,10 @@ dpif_netdev_execute(struct dpif *dpif, const struct dpif_execute *execute) error = dpif_netdev_flow_from_nlattrs(execute->key, execute->key_len, &key); if (!error) { - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); dp_netdev_execute_actions(dp, ©, &key, execute->actions, execute->actions_len); - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); } ofpbuf_uninit(©); @@ -1075,7 +1093,7 @@ dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall, struct dp_netdev_queue *q; int error; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); q = find_nonempty_queue(dpif); if (q) { struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK]; @@ -1090,7 +1108,7 @@ dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall, } else { error = EAGAIN; } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); return error; } @@ -1098,24 +1116,26 @@ dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall, static void dpif_netdev_recv_wait(struct dpif *dpif) { - /* XXX In a multithreaded process, there is a race window between this - * function and the poll_block() in one thread and a packet being queued in - * another thread. */ + struct dp_netdev *dp = get_dp_netdev(dpif); + uint64_t seq; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); + seq = seq_read(dp->queue_seq); if (find_nonempty_queue(dpif)) { poll_immediate_wake(); + } else { + seq_wait(dp->queue_seq, seq); } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); } static void dpif_netdev_recv_purge(struct dpif *dpif) { struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif); - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); dp_netdev_purge_queues(dpif_netdev->dp); - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); } static void @@ -1130,7 +1150,7 @@ dp_netdev_flow_used(struct dp_netdev_flow *flow, const struct ofpbuf *packet) static void dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port, struct ofpbuf *packet, uint32_t skb_priority, - uint32_t skb_mark, const struct flow_tnl *tnl) + uint32_t pkt_mark, const struct flow_tnl *tnl) { struct dp_netdev_flow *flow; struct flow key; @@ -1140,7 +1160,7 @@ dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port, return; } in_port_.odp_port = port->port_no; - flow_extract(packet, skb_priority, skb_mark, tnl, &in_port_, &key); + flow_extract(packet, skb_priority, pkt_mark, tnl, &in_port_, &key); flow = dp_netdev_lookup_flow(dp, &key); if (flow) { dp_netdev_flow_used(flow, packet); @@ -1160,7 +1180,7 @@ dpif_netdev_run(struct dpif *dpif) struct dp_netdev *dp; struct ofpbuf packet; - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); dp = get_dp_netdev(dpif); ofpbuf_init(&packet, DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + dp->max_mtu); @@ -1183,7 +1203,7 @@ dpif_netdev_run(struct dpif *dpif) } } ofpbuf_uninit(&packet); - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); } static void @@ -1204,20 +1224,21 @@ dpif_netdev_wait(struct dpif *dpif) * - In the dpif_port_remove() case, A might wake up spuriously, but * that is harmless. */ - xpthread_mutex_lock(&dp_netdev_mutex); + ovs_mutex_lock(&dp_netdev_mutex); LIST_FOR_EACH (port, node, &get_dp_netdev(dpif)->port_list) { if (port->rx) { netdev_rx_wait(port->rx); } } - xpthread_mutex_unlock(&dp_netdev_mutex); + ovs_mutex_unlock(&dp_netdev_mutex); } static void -dp_netdev_output_port(void *dp_, struct ofpbuf *packet, uint32_t out_port) +dp_netdev_output_port(void *dp_, struct ofpbuf *packet, + const struct flow *flow OVS_UNUSED, odp_port_t out_port) { struct dp_netdev *dp = dp_; - struct dp_netdev_port *p = dp->ports[out_port]; + struct dp_netdev_port *p = dp->ports[odp_to_u32(out_port)]; if (p) { netdev_send(p->netdev, packet); } @@ -1265,6 +1286,8 @@ dp_netdev_output_userspace(struct dp_netdev *dp, const struct ofpbuf *packet, buf->size = packet->size; upcall->packet = buf; + seq_change(dp->queue_seq); + return 0; } else { dp->n_lost++; @@ -1275,8 +1298,11 @@ dp_netdev_output_userspace(struct dp_netdev *dp, const struct ofpbuf *packet, static void dp_netdev_action_userspace(void *dp, struct ofpbuf *packet, const struct flow *key, - const struct nlattr *userdata) + const struct nlattr *a) { + const struct nlattr *userdata; + + userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA); dp_netdev_output_userspace(dp, packet, DPIF_UC_ACTION, key, userdata); } @@ -1290,43 +1316,86 @@ dp_netdev_execute_actions(struct dp_netdev *dp, dp_netdev_output_port, dp_netdev_action_userspace); } +#define DPIF_NETDEV_CLASS_FUNCTIONS \ + dpif_netdev_enumerate, \ + dpif_netdev_port_open_type, \ + dpif_netdev_open, \ + dpif_netdev_close, \ + dpif_netdev_destroy, \ + dpif_netdev_run, \ + dpif_netdev_wait, \ + dpif_netdev_get_stats, \ + dpif_netdev_port_add, \ + dpif_netdev_port_del, \ + dpif_netdev_port_query_by_number, \ + dpif_netdev_port_query_by_name, \ + dpif_netdev_get_max_ports, \ + NULL, /* port_get_pid */ \ + dpif_netdev_port_dump_start, \ + dpif_netdev_port_dump_next, \ + dpif_netdev_port_dump_done, \ + dpif_netdev_port_poll, \ + dpif_netdev_port_poll_wait, \ + dpif_netdev_flow_get, \ + dpif_netdev_flow_put, \ + dpif_netdev_flow_del, \ + dpif_netdev_flow_flush, \ + dpif_netdev_flow_dump_start, \ + dpif_netdev_flow_dump_next, \ + dpif_netdev_flow_dump_done, \ + dpif_netdev_execute, \ + NULL, /* operate */ \ + dpif_netdev_recv_set, \ + dpif_netdev_queue_to_priority, \ + dpif_netdev_recv, \ + dpif_netdev_recv_wait, \ + dpif_netdev_recv_purge, \ + const struct dpif_class dpif_netdev_class = { "netdev", - dpif_netdev_enumerate, - dpif_netdev_port_open_type, - dpif_netdev_open, - dpif_netdev_close, - dpif_netdev_destroy, - dpif_netdev_run, - dpif_netdev_wait, - dpif_netdev_get_stats, - dpif_netdev_port_add, - dpif_netdev_port_del, - dpif_netdev_port_query_by_number, - dpif_netdev_port_query_by_name, - dpif_netdev_get_max_ports, - NULL, /* port_get_pid */ - dpif_netdev_port_dump_start, - dpif_netdev_port_dump_next, - dpif_netdev_port_dump_done, - dpif_netdev_port_poll, - dpif_netdev_port_poll_wait, - dpif_netdev_flow_get, - dpif_netdev_flow_put, - dpif_netdev_flow_del, - dpif_netdev_flow_flush, - dpif_netdev_flow_dump_start, - dpif_netdev_flow_dump_next, - dpif_netdev_flow_dump_done, - dpif_netdev_execute, - NULL, /* operate */ - dpif_netdev_recv_set, - dpif_netdev_queue_to_priority, - dpif_netdev_recv, - dpif_netdev_recv_wait, - dpif_netdev_recv_purge, + DPIF_NETDEV_CLASS_FUNCTIONS +}; + +const struct dpif_class dpif_planetlab_class = { + "planetlab", + DPIF_NETDEV_CLASS_FUNCTIONS }; +static void +dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED, + const char *argv[], void *aux OVS_UNUSED) +{ + struct dp_netdev_port *port; + struct dp_netdev *dp; + int port_no; + + dp = shash_find_data(&dp_netdevs, argv[1]); + if (!dp || !dpif_netdev_class_is_dummy(dp->class)) { + unixctl_command_reply_error(conn, "unknown datapath or not a dummy"); + return; + } + + if (get_port_by_name(dp, argv[2], &port)) { + unixctl_command_reply_error(conn, "unknown port"); + return; + } + + port_no = atoi(argv[3]); + if (port_no <= 0 || port_no >= MAX_PORTS) { + unixctl_command_reply_error(conn, "bad port number"); + return; + } + if (dp->ports[port_no]) { + unixctl_command_reply_error(conn, "port number already in use"); + return; + } + dp->ports[odp_to_u32(port->port_no)] = NULL; + dp->ports[port_no] = port; + port->port_no = u32_to_odp(port_no); + seq_change(dp->port_seq); + unixctl_command_reply(conn, NULL); +} + static void dpif_dummy_register__(const char *type) { @@ -1356,4 +1425,9 @@ dpif_dummy_register(bool override) } dpif_dummy_register__("dummy"); + + unixctl_command_register("dpif-dummy/change-port-number", + "DP PORT NEW-NUMBER", + 3, 3, dpif_dummy_change_port_number, NULL); } +