X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fdpif-netdev.c;h=36b6d4a0552ce93922d41374f00eb8a8a1ab6c65;hb=2431be1b68d386bd616378d2c528242775c4d54a;hp=8e5e6df752da9fff5e0ba137e246ff7c6b42d9af;hpb=4e022ec09e14ac89add74c1b4b8e3ff3873edbf0;p=sliver-openvswitch.git diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 8e5e6df75..36b6d4a05 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -49,9 +49,11 @@ #include "packets.h" #include "poll-loop.h" #include "random.h" +#include "seq.h" #include "shash.h" #include "sset.h" #include "timeval.h" +#include "unixctl.h" #include "util.h" #include "vlog.h" @@ -87,9 +89,11 @@ struct dp_netdev { char *name; int open_cnt; bool destroyed; + int max_mtu; /* Maximum MTU of any port added so far. */ struct dp_netdev_queue queues[N_QUEUES]; struct hmap flow_table; /* Flow table. */ + struct seq *queue_seq; /* Incremented whenever a packet is queued. */ /* Statistics. */ long long int n_hit; /* Number of flow table matches. */ @@ -99,7 +103,7 @@ struct dp_netdev { /* Ports. */ struct dp_netdev_port *ports[MAX_PORTS]; struct list port_list; - unsigned int serial; + struct seq *port_seq; /* Incremented whenever a port changes. */ }; /* A port in a netdev-based datapath. */ @@ -132,14 +136,14 @@ struct dp_netdev_flow { struct dpif_netdev { struct dpif dpif; struct dp_netdev *dp; - unsigned int dp_serial; + uint64_t last_port_seq; }; /* All netdev-based datapaths. */ static struct shash dp_netdevs = SHASH_INITIALIZER(&dp_netdevs); -/* Maximum port MTU seen so far. */ -static int max_mtu = ETH_PAYLOAD_MAX; +/* Global lock for all data. */ +static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER; static int get_port_by_number(struct dp_netdev *, odp_port_t port_no, struct dp_netdev_port **portp); @@ -162,7 +166,7 @@ static void dp_netdev_execute_actions(struct dp_netdev *, static void dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port, struct ofpbuf *packet, uint32_t skb_priority, - uint32_t skb_mark, const struct flow_tnl *tnl); + uint32_t pkt_mark, const struct flow_tnl *tnl); static struct dpif_netdev * dpif_netdev_cast(const struct dpif *dpif) @@ -182,9 +186,12 @@ dpif_netdev_enumerate(struct sset *all_dps) { struct shash_node *node; + ovs_mutex_lock(&dp_netdev_mutex); SHASH_FOR_EACH(node, &dp_netdevs) { sset_add(all_dps, node->name); } + ovs_mutex_unlock(&dp_netdev_mutex); + return 0; } @@ -194,10 +201,17 @@ dpif_netdev_class_is_dummy(const struct dpif_class *class) return class != &dpif_netdev_class; } +static bool +dpif_netdev_class_is_planetlab(const struct dpif_class *class) +{ + return class == &dpif_planetlab_class; +} + static const char * dpif_netdev_port_open_type(const struct dpif_class *class, const char *type) { return strcmp(type, "internal") ? type + : dpif_netdev_class_is_planetlab(class) ? "pltap" : dpif_netdev_class_is_dummy(class) ? "dummy" : "tap"; } @@ -213,7 +227,7 @@ create_dpif_netdev(struct dp_netdev *dp) dpif = xmalloc(sizeof *dpif); dpif_init(&dpif->dpif, dp->class, dp->name, netflow_id >> 8, netflow_id); dpif->dp = dp; - dpif->dp_serial = dp->serial; + dpif->last_port_seq = seq_read(dp->port_seq); return &dpif->dpif; } @@ -225,7 +239,8 @@ choose_port(struct dp_netdev *dp, const char *name) { uint32_t port_no; - if (dp->class != &dpif_netdev_class) { + if (dp->class != &dpif_netdev_class && + dp->class != &dpif_planetlab_class) { const char *p; int start_no = 0; @@ -271,11 +286,14 @@ create_dp_netdev(const char *name, const struct dpif_class *class, dp->class = class; dp->name = xstrdup(name); dp->open_cnt = 0; + dp->max_mtu = ETH_PAYLOAD_MAX; for (i = 0; i < N_QUEUES; i++) { dp->queues[i].head = dp->queues[i].tail = 0; } + dp->queue_seq = seq_create(); hmap_init(&dp->flow_table); list_init(&dp->port_list); + dp->port_seq = seq_create(); error = do_add_port(dp, name, "internal", ODPP_LOCAL); if (error) { @@ -294,28 +312,23 @@ dpif_netdev_open(const struct dpif_class *class, const char *name, bool create, struct dpif **dpifp) { struct dp_netdev *dp; + int error; + ovs_mutex_lock(&dp_netdev_mutex); dp = shash_find_data(&dp_netdevs, name); if (!dp) { - if (!create) { - return ENODEV; - } else { - int error = create_dp_netdev(name, class, &dp); - if (error) { - return error; - } - ovs_assert(dp != NULL); - } + error = create ? create_dp_netdev(name, class, &dp) : ENODEV; } else { - if (dp->class != class) { - return EINVAL; - } else if (create) { - return EEXIST; - } + error = (dp->class != class ? EINVAL + : create ? EEXIST + : 0); } + if (!error) { + *dpifp = create_dpif_netdev(dp); + } + ovs_mutex_unlock(&dp_netdev_mutex); - *dpifp = create_dpif_netdev(dp); - return 0; + return error; } static void @@ -343,7 +356,9 @@ dp_netdev_free(struct dp_netdev *dp) do_del_port(dp, port->port_no); } dp_netdev_purge_queues(dp); + seq_destroy(dp->queue_seq); hmap_destroy(&dp->flow_table); + seq_destroy(dp->port_seq); free(dp->name); free(dp); } @@ -352,19 +367,28 @@ static void dpif_netdev_close(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); + + ovs_mutex_lock(&dp_netdev_mutex); + ovs_assert(dp->open_cnt > 0); if (--dp->open_cnt == 0 && dp->destroyed) { shash_find_and_delete(&dp_netdevs, dp->name); dp_netdev_free(dp); } free(dpif); + + ovs_mutex_unlock(&dp_netdev_mutex); } static int dpif_netdev_destroy(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); + + ovs_mutex_lock(&dp_netdev_mutex); dp->destroyed = true; + ovs_mutex_unlock(&dp_netdev_mutex); + return 0; } @@ -372,10 +396,16 @@ static int dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats) { struct dp_netdev *dp = get_dp_netdev(dpif); + + ovs_mutex_lock(&dp_netdev_mutex); stats->n_flows = hmap_count(&dp->flow_table); stats->n_hit = dp->n_hit; stats->n_missed = dp->n_missed; stats->n_lost = dp->n_lost; + stats->n_masks = UINT64_MAX; + stats->n_mask_hit = UINT64_MAX; + ovs_mutex_unlock(&dp_netdev_mutex); + return 0; } @@ -387,6 +417,7 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, struct dp_netdev_port *port; struct netdev *netdev; struct netdev_rx *rx; + enum netdev_flags flags; const char *open_type; int mtu; int error; @@ -399,14 +430,20 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, if (error) { return error; } - /* XXX reject loopback devices */ /* XXX reject non-Ethernet devices */ + netdev_get_flags(netdev, &flags); + if (flags & NETDEV_LOOPBACK) { + VLOG_ERR("%s: cannot add a loopback device", devname); + netdev_close(netdev); + return EINVAL; + } + error = netdev_rx_open(netdev, &rx); if (error && !(error == EOPNOTSUPP && dpif_netdev_class_is_dummy(dp->class))) { VLOG_ERR("%s: cannot receive packets on this network device (%s)", - devname, strerror(errno)); + devname, ovs_strerror(errno)); netdev_close(netdev); return error; } @@ -426,13 +463,13 @@ do_add_port(struct dp_netdev *dp, const char *devname, const char *type, port->type = xstrdup(type); error = netdev_get_mtu(netdev, &mtu); - if (!error && mtu > max_mtu) { - max_mtu = mtu; + if (!error && mtu > dp->max_mtu) { + dp->max_mtu = mtu; } list_push_back(&dp->port_list, &port->node); dp->ports[odp_to_u32(port_no)] = port; - dp->serial++; + seq_change(dp->port_seq); return 0; } @@ -445,32 +482,44 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev, char namebuf[NETDEV_VPORT_NAME_BUFSIZE]; const char *dpif_port; odp_port_t port_no; + int error; + ovs_mutex_lock(&dp_netdev_mutex); dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf); if (*port_nop != ODPP_NONE) { uint32_t port_idx = odp_to_u32(*port_nop); if (port_idx >= MAX_PORTS) { - return EFBIG; + error = EFBIG; } else if (dp->ports[port_idx]) { - return EBUSY; + error = EBUSY; + } else { + error = 0; + port_no = *port_nop; } - port_no = *port_nop; } else { port_no = choose_port(dp, dpif_port); + error = port_no == ODPP_NONE ? EFBIG : 0; } - if (port_no != ODPP_NONE) { + if (!error) { *port_nop = port_no; - return do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no); + error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no); } - return EFBIG; + ovs_mutex_unlock(&dp_netdev_mutex); + + return error; } static int dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no) { struct dp_netdev *dp = get_dp_netdev(dpif); - return (port_no == ODPP_LOCAL ? - EINVAL : do_del_port(dp, port_no)); + int error; + + ovs_mutex_lock(&dp_netdev_mutex); + error = port_no == ODPP_LOCAL ? EINVAL : do_del_port(dp, port_no); + ovs_mutex_unlock(&dp_netdev_mutex); + + return error; } static bool @@ -520,7 +569,7 @@ do_del_port(struct dp_netdev *dp, odp_port_t port_no) list_remove(&port->node); dp->ports[odp_to_u32(port_no)] = NULL; - dp->serial++; + seq_change(dp->port_seq); netdev_close(port->netdev); netdev_restore_flags(port->sf); @@ -548,10 +597,13 @@ dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no, struct dp_netdev_port *port; int error; + ovs_mutex_lock(&dp_netdev_mutex); error = get_port_by_number(dp, port_no, &port); if (!error && dpif_port) { answer_port_query(port, dpif_port); } + ovs_mutex_unlock(&dp_netdev_mutex); + return error; } @@ -563,17 +615,20 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname, struct dp_netdev_port *port; int error; + ovs_mutex_lock(&dp_netdev_mutex); error = get_port_by_name(dp, devname, &port); if (!error && dpif_port) { answer_port_query(port, dpif_port); } + ovs_mutex_unlock(&dp_netdev_mutex); + return error; } -static odp_port_t +static uint32_t dpif_netdev_get_max_ports(const struct dpif *dpif OVS_UNUSED) { - return u32_to_odp(MAX_PORTS); + return MAX_PORTS; } static void @@ -598,7 +653,11 @@ static int dpif_netdev_flow_flush(struct dpif *dpif) { struct dp_netdev *dp = get_dp_netdev(dpif); + + ovs_mutex_lock(&dp_netdev_mutex); dp_netdev_flow_flush(dp); + ovs_mutex_unlock(&dp_netdev_mutex); + return 0; } @@ -622,6 +681,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_, struct dp_netdev *dp = get_dp_netdev(dpif); uint32_t port_idx; + ovs_mutex_lock(&dp_netdev_mutex); for (port_idx = odp_to_u32(state->port_no); port_idx < MAX_PORTS; port_idx++) { struct dp_netdev_port *port = dp->ports[port_idx]; @@ -632,9 +692,13 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_, dpif_port->type = port->type; dpif_port->port_no = port->port_no; state->port_no = u32_to_odp(port_idx + 1); + ovs_mutex_unlock(&dp_netdev_mutex); + return 0; } } + ovs_mutex_unlock(&dp_netdev_mutex); + return EOF; } @@ -651,21 +715,30 @@ static int dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED) { struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); - if (dpif->dp_serial != dpif->dp->serial) { - dpif->dp_serial = dpif->dp->serial; - return ENOBUFS; + uint64_t new_port_seq; + int error; + + ovs_mutex_lock(&dp_netdev_mutex); + new_port_seq = seq_read(dpif->dp->port_seq); + if (dpif->last_port_seq != new_port_seq) { + dpif->last_port_seq = new_port_seq; + error = ENOBUFS; } else { - return EAGAIN; + error = EAGAIN; } + ovs_mutex_unlock(&dp_netdev_mutex); + + return error; } static void dpif_netdev_port_poll_wait(const struct dpif *dpif_) { struct dpif_netdev *dpif = dpif_netdev_cast(dpif_); - if (dpif->dp_serial != dpif->dp->serial) { - poll_immediate_wake(); - } + + ovs_mutex_lock(&dp_netdev_mutex); + seq_wait(dpif->dp->port_seq, dpif->last_port_seq); + ovs_mutex_unlock(&dp_netdev_mutex); } static struct dp_netdev_flow * @@ -694,6 +767,8 @@ static int dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len, struct flow *flow) { + odp_port_t in_port; + if (odp_flow_key_to_flow(key, key_len, flow) != ODP_FIT_PERFECT) { /* This should not happen: it indicates that odp_flow_key_from_flow() * and odp_flow_key_to_flow() disagree on the acceptable form of a @@ -713,7 +788,8 @@ dpif_netdev_flow_from_nlattrs(const struct nlattr *key, uint32_t key_len, return EINVAL; } - if (!is_valid_port_number(flow->in_port.odp_port)) { + in_port = flow->in_port.odp_port; + if (!is_valid_port_number(in_port) && in_port != ODPP_NONE) { return EINVAL; } @@ -735,18 +811,21 @@ dpif_netdev_flow_get(const struct dpif *dpif, return error; } + ovs_mutex_lock(&dp_netdev_mutex); flow = dp_netdev_lookup_flow(dp, &key); - if (!flow) { - return ENOENT; + if (flow) { + if (stats) { + get_dpif_flow_stats(flow, stats); + } + if (actionsp) { + *actionsp = ofpbuf_clone_data(flow->actions, flow->actions_len); + } + } else { + error = ENOENT; } + ovs_mutex_unlock(&dp_netdev_mutex); - if (stats) { - get_dpif_flow_stats(flow, stats); - } - if (actionsp) { - *actionsp = ofpbuf_clone_data(flow->actions, flow->actions_len); - } - return 0; + return error; } static int @@ -801,6 +880,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) return error; } + ovs_mutex_lock(&dp_netdev_mutex); flow = dp_netdev_lookup_flow(dp, &key); if (!flow) { if (put->flags & DPIF_FP_CREATE) { @@ -808,17 +888,17 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) if (put->stats) { memset(put->stats, 0, sizeof *put->stats); } - return dp_netdev_flow_add(dp, &key, put->actions, - put->actions_len); + error = dp_netdev_flow_add(dp, &key, put->actions, + put->actions_len); } else { - return EFBIG; + error = EFBIG; } } else { - return ENOENT; + error = ENOENT; } } else { if (put->flags & DPIF_FP_MODIFY) { - int error = set_flow_actions(flow, put->actions, put->actions_len); + error = set_flow_actions(flow, put->actions, put->actions_len); if (!error) { if (put->stats) { get_dpif_flow_stats(flow, put->stats); @@ -827,11 +907,13 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put) clear_stats(flow); } } - return error; } else { - return EEXIST; + error = EEXIST; } } + ovs_mutex_unlock(&dp_netdev_mutex); + + return error; } static int @@ -847,16 +929,19 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del) return error; } + ovs_mutex_lock(&dp_netdev_mutex); flow = dp_netdev_lookup_flow(dp, &key); if (flow) { if (del->stats) { get_dpif_flow_stats(flow, del->stats); } dp_netdev_free_flow(dp, flow); - return 0; } else { - return ENOENT; + error = ENOENT; } + ovs_mutex_unlock(&dp_netdev_mutex); + + return error; } struct dp_netdev_flow_state { @@ -891,8 +976,10 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_, struct dp_netdev_flow *flow; struct hmap_node *node; + ovs_mutex_lock(&dp_netdev_mutex); node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset); if (!node) { + ovs_mutex_unlock(&dp_netdev_mutex); return EOF; } @@ -926,6 +1013,7 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_, *stats = &state->stats; } + ovs_mutex_unlock(&dp_netdev_mutex); return 0; } @@ -961,8 +1049,10 @@ dpif_netdev_execute(struct dpif *dpif, const struct dpif_execute *execute) error = dpif_netdev_flow_from_nlattrs(execute->key, execute->key_len, &key); if (!error) { + ovs_mutex_lock(&dp_netdev_mutex); dp_netdev_execute_actions(dp, ©, &key, execute->actions, execute->actions_len); + ovs_mutex_unlock(&dp_netdev_mutex); } ofpbuf_uninit(©); @@ -1002,7 +1092,11 @@ static int dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall, struct ofpbuf *buf) { - struct dp_netdev_queue *q = find_nonempty_queue(dpif); + struct dp_netdev_queue *q; + int error; + + ovs_mutex_lock(&dp_netdev_mutex); + q = find_nonempty_queue(dpif); if (q) { struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK]; @@ -1012,28 +1106,38 @@ dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall, ofpbuf_uninit(buf); *buf = u->buf; - return 0; + error = 0; } else { - return EAGAIN; + error = EAGAIN; } + ovs_mutex_unlock(&dp_netdev_mutex); + + return error; } static void dpif_netdev_recv_wait(struct dpif *dpif) { + struct dp_netdev *dp = get_dp_netdev(dpif); + uint64_t seq; + + ovs_mutex_lock(&dp_netdev_mutex); + seq = seq_read(dp->queue_seq); if (find_nonempty_queue(dpif)) { poll_immediate_wake(); } else { - /* No messages ready to be received, and dp_wait() will ensure that we - * wake up to queue new messages, so there is nothing to do. */ + seq_wait(dp->queue_seq, seq); } + ovs_mutex_unlock(&dp_netdev_mutex); } static void dpif_netdev_recv_purge(struct dpif *dpif) { struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif); + ovs_mutex_lock(&dp_netdev_mutex); dp_netdev_purge_queues(dpif_netdev->dp); + ovs_mutex_unlock(&dp_netdev_mutex); } static void @@ -1048,7 +1152,7 @@ dp_netdev_flow_used(struct dp_netdev_flow *flow, const struct ofpbuf *packet) static void dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port, struct ofpbuf *packet, uint32_t skb_priority, - uint32_t skb_mark, const struct flow_tnl *tnl) + uint32_t pkt_mark, const struct flow_tnl *tnl) { struct dp_netdev_flow *flow; struct flow key; @@ -1058,7 +1162,7 @@ dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port, return; } in_port_.odp_port = port->port_no; - flow_extract(packet, skb_priority, skb_mark, tnl, &in_port_, &key); + flow_extract(packet, skb_priority, pkt_mark, tnl, &in_port_, &key); flow = dp_netdev_lookup_flow(dp, &key); if (flow) { dp_netdev_flow_used(flow, packet); @@ -1074,11 +1178,14 @@ dp_netdev_port_input(struct dp_netdev *dp, struct dp_netdev_port *port, static void dpif_netdev_run(struct dpif *dpif) { - struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_port *port; + struct dp_netdev *dp; struct ofpbuf packet; - ofpbuf_init(&packet, DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + max_mtu); + ovs_mutex_lock(&dp_netdev_mutex); + dp = get_dp_netdev(dpif); + ofpbuf_init(&packet, + DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + dp->max_mtu); LIST_FOR_EACH (port, node, &dp->port_list) { int error; @@ -1094,30 +1201,46 @@ dpif_netdev_run(struct dpif *dpif) static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); VLOG_ERR_RL(&rl, "error receiving data from %s: %s", - netdev_get_name(port->netdev), strerror(error)); + netdev_get_name(port->netdev), ovs_strerror(error)); } } ofpbuf_uninit(&packet); + ovs_mutex_unlock(&dp_netdev_mutex); } static void dpif_netdev_wait(struct dpif *dpif) { - struct dp_netdev *dp = get_dp_netdev(dpif); struct dp_netdev_port *port; - LIST_FOR_EACH (port, node, &dp->port_list) { + /* There is a race here, if thread A calls dpif_netdev_wait(dpif) and + * thread B calls dpif_port_add(dpif) or dpif_port_remove(dpif) before + * A makes it to poll_block(). + * + * But I think it doesn't matter: + * + * - In the dpif_port_add() case, A will not wake up when a packet + * arrives on the new port, but this would also happen if the + * ordering were reversed. + * + * - In the dpif_port_remove() case, A might wake up spuriously, but + * that is harmless. */ + + ovs_mutex_lock(&dp_netdev_mutex); + LIST_FOR_EACH (port, node, &get_dp_netdev(dpif)->port_list) { if (port->rx) { netdev_rx_wait(port->rx); } } + ovs_mutex_unlock(&dp_netdev_mutex); } static void -dp_netdev_output_port(void *dp_, struct ofpbuf *packet, uint32_t out_port) +dp_netdev_output_port(void *dp_, struct ofpbuf *packet, + const struct flow *flow OVS_UNUSED, odp_port_t out_port) { struct dp_netdev *dp = dp_; - struct dp_netdev_port *p = dp->ports[out_port]; + struct dp_netdev_port *p = dp->ports[odp_to_u32(out_port)]; if (p) { netdev_send(p->netdev, packet); } @@ -1165,6 +1288,8 @@ dp_netdev_output_userspace(struct dp_netdev *dp, const struct ofpbuf *packet, buf->size = packet->size; upcall->packet = buf; + seq_change(dp->queue_seq); + return 0; } else { dp->n_lost++; @@ -1175,8 +1300,11 @@ dp_netdev_output_userspace(struct dp_netdev *dp, const struct ofpbuf *packet, static void dp_netdev_action_userspace(void *dp, struct ofpbuf *packet, const struct flow *key, - const struct nlattr *userdata) + const struct nlattr *a) { + const struct nlattr *userdata; + + userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA); dp_netdev_output_userspace(dp, packet, DPIF_UC_ACTION, key, userdata); } @@ -1190,43 +1318,86 @@ dp_netdev_execute_actions(struct dp_netdev *dp, dp_netdev_output_port, dp_netdev_action_userspace); } +#define DPIF_NETDEV_CLASS_FUNCTIONS \ + dpif_netdev_enumerate, \ + dpif_netdev_port_open_type, \ + dpif_netdev_open, \ + dpif_netdev_close, \ + dpif_netdev_destroy, \ + dpif_netdev_run, \ + dpif_netdev_wait, \ + dpif_netdev_get_stats, \ + dpif_netdev_port_add, \ + dpif_netdev_port_del, \ + dpif_netdev_port_query_by_number, \ + dpif_netdev_port_query_by_name, \ + dpif_netdev_get_max_ports, \ + NULL, /* port_get_pid */ \ + dpif_netdev_port_dump_start, \ + dpif_netdev_port_dump_next, \ + dpif_netdev_port_dump_done, \ + dpif_netdev_port_poll, \ + dpif_netdev_port_poll_wait, \ + dpif_netdev_flow_get, \ + dpif_netdev_flow_put, \ + dpif_netdev_flow_del, \ + dpif_netdev_flow_flush, \ + dpif_netdev_flow_dump_start, \ + dpif_netdev_flow_dump_next, \ + dpif_netdev_flow_dump_done, \ + dpif_netdev_execute, \ + NULL, /* operate */ \ + dpif_netdev_recv_set, \ + dpif_netdev_queue_to_priority, \ + dpif_netdev_recv, \ + dpif_netdev_recv_wait, \ + dpif_netdev_recv_purge, \ + const struct dpif_class dpif_netdev_class = { "netdev", - dpif_netdev_enumerate, - dpif_netdev_port_open_type, - dpif_netdev_open, - dpif_netdev_close, - dpif_netdev_destroy, - dpif_netdev_run, - dpif_netdev_wait, - dpif_netdev_get_stats, - dpif_netdev_port_add, - dpif_netdev_port_del, - dpif_netdev_port_query_by_number, - dpif_netdev_port_query_by_name, - dpif_netdev_get_max_ports, - NULL, /* port_get_pid */ - dpif_netdev_port_dump_start, - dpif_netdev_port_dump_next, - dpif_netdev_port_dump_done, - dpif_netdev_port_poll, - dpif_netdev_port_poll_wait, - dpif_netdev_flow_get, - dpif_netdev_flow_put, - dpif_netdev_flow_del, - dpif_netdev_flow_flush, - dpif_netdev_flow_dump_start, - dpif_netdev_flow_dump_next, - dpif_netdev_flow_dump_done, - dpif_netdev_execute, - NULL, /* operate */ - dpif_netdev_recv_set, - dpif_netdev_queue_to_priority, - dpif_netdev_recv, - dpif_netdev_recv_wait, - dpif_netdev_recv_purge, + DPIF_NETDEV_CLASS_FUNCTIONS +}; + +const struct dpif_class dpif_planetlab_class = { + "planetlab", + DPIF_NETDEV_CLASS_FUNCTIONS }; +static void +dpif_dummy_change_port_number(struct unixctl_conn *conn, int argc OVS_UNUSED, + const char *argv[], void *aux OVS_UNUSED) +{ + struct dp_netdev_port *port; + struct dp_netdev *dp; + int port_no; + + dp = shash_find_data(&dp_netdevs, argv[1]); + if (!dp || !dpif_netdev_class_is_dummy(dp->class)) { + unixctl_command_reply_error(conn, "unknown datapath or not a dummy"); + return; + } + + if (get_port_by_name(dp, argv[2], &port)) { + unixctl_command_reply_error(conn, "unknown port"); + return; + } + + port_no = atoi(argv[3]); + if (port_no <= 0 || port_no >= MAX_PORTS) { + unixctl_command_reply_error(conn, "bad port number"); + return; + } + if (dp->ports[port_no]) { + unixctl_command_reply_error(conn, "port number already in use"); + return; + } + dp->ports[odp_to_u32(port->port_no)] = NULL; + dp->ports[port_no] = port; + port->port_no = u32_to_odp(port_no); + seq_change(dp->port_seq); + unixctl_command_reply(conn, NULL); +} + static void dpif_dummy_register__(const char *type) { @@ -1256,4 +1427,9 @@ dpif_dummy_register(bool override) } dpif_dummy_register__("dummy"); + + unixctl_command_register("dpif-dummy/change-port-number", + "DP PORT NEW-NUMBER", + 3, 3, dpif_dummy_change_port_number, NULL); } +