static uint64_t pick_datapath_id(const struct ofproto *);
static uint64_t pick_fallback_dpid(void);
static void ofproto_destroy__(struct ofproto *);
-static void set_internal_devs_mtu(struct ofproto *);
+static void update_mtu(struct ofproto *, struct ofport *);
/* unixctl. */
static void ofproto_unixctl_init(void);
hmap_init(&ofproto->deletions);
ofproto->vlan_bitmap = NULL;
ofproto->vlans_changed = false;
+ ofproto->min_mtu = INT_MAX;
error = ofproto->ofproto_class->construct(ofproto);
if (error) {
int
ofproto_run(struct ofproto *p)
{
+ struct sset changed_netdevs;
+ const char *changed_netdev;
struct ofport *ofport;
- char *devname;
int error;
error = p->ofproto_class->run(p);
}
if (p->ofproto_class->port_poll) {
+ char *devname;
+
while ((error = p->ofproto_class->port_poll(p, &devname)) != EAGAIN) {
process_port_change(p, error, devname);
}
}
+ /* Update OpenFlow port status for any port whose netdev has changed.
+ *
+ * Refreshing a given 'ofport' can cause an arbitrary ofport to be
+ * destroyed, so it's not safe to update ports directly from the
+ * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we
+ * need this two-phase approach. */
+ sset_init(&changed_netdevs);
HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
unsigned int change_seq = netdev_change_seq(ofport->netdev);
if (ofport->change_seq != change_seq) {
ofport->change_seq = change_seq;
- update_port(p, netdev_get_name(ofport->netdev));
+ sset_add(&changed_netdevs, netdev_get_name(ofport->netdev));
}
}
+ SSET_FOR_EACH (changed_netdev, &changed_netdevs) {
+ update_port(p, changed_netdev);
+ }
+ sset_destroy(&changed_netdevs);
switch (p->state) {
case S_OPENFLOW:
{
const char *netdev_name = netdev_get_name(netdev);
struct ofport *ofport;
- int dev_mtu;
int error;
/* Create ofport. */
hmap_insert(&p->ports, &ofport->hmap_node, hash_int(ofport->ofp_port, 0));
shash_add(&p->port_by_name, netdev_name, ofport);
- if (!netdev_get_mtu(netdev, &dev_mtu)) {
- set_internal_devs_mtu(p);
- ofport->mtu = dev_mtu;
- } else {
- ofport->mtu = 0;
- }
+ update_mtu(p, ofport);
/* Let the ofproto_class initialize its private data. */
error = p->ofproto_class->port_construct(ofport);
port = ofproto_get_port(ofproto, ofproto_port.ofp_port);
if (port && !strcmp(netdev_get_name(port->netdev), name)) {
struct netdev *old_netdev = port->netdev;
- int dev_mtu;
/* 'name' hasn't changed location. Any properties changed? */
if (!ofport_equal(&port->opp, &opp)) {
ofport_modified(port, &opp);
}
- /* If this is a non-internal port and the MTU changed, check
- * if the datapath's MTU needs to be updated. */
- if (strcmp(netdev_get_type(netdev), "internal")
- && !netdev_get_mtu(netdev, &dev_mtu)
- && port->mtu != dev_mtu) {
- set_internal_devs_mtu(ofproto);
- port->mtu = dev_mtu;
- }
+ update_mtu(ofproto, port);
/* Install the newly opened netdev in case it has changed.
* Don't close the old netdev yet in case port_modified has to
return mtu ? mtu: ETH_PAYLOAD_MAX;
}
-/* Set the MTU of all datapath devices on 'p' to the minimum of the
- * non-datapath ports. */
+/* Update MTU of all datapath devices on 'p' to the minimum of the
+ * non-datapath ports in event of 'port' added or changed. */
static void
-set_internal_devs_mtu(struct ofproto *p)
+update_mtu(struct ofproto *p, struct ofport *port)
{
struct ofport *ofport;
- int mtu = find_min_mtu(p);
+ struct netdev *netdev = port->netdev;
+ int dev_mtu, old_min;
+
+ if (netdev_get_mtu(netdev, &dev_mtu)) {
+ port->mtu = 0;
+ return;
+ }
+ if (!strcmp(netdev_get_type(port->netdev), "internal")) {
+ if (dev_mtu > p->min_mtu) {
+ if (!netdev_set_mtu(port->netdev, p->min_mtu)) {
+ dev_mtu = p->min_mtu;
+ }
+ }
+ port->mtu = dev_mtu;
+ return;
+ }
+
+ /* For non-internal port find new min mtu. */
+ old_min = p->min_mtu;
+ port->mtu = dev_mtu;
+ p->min_mtu = find_min_mtu(p);
+ if (p->min_mtu == old_min) {
+ return;
+ }
HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
struct netdev *netdev = ofport->netdev;
if (!strcmp(netdev_get_type(netdev), "internal")) {
- netdev_set_mtu(netdev, mtu);
+ if (!netdev_set_mtu(netdev, p->min_mtu)) {
+ ofport->mtu = p->min_mtu;
+ }
}
}
}
return 0;
}
+static enum ofperr
+handle_nxt_set_async_config(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ const struct nx_async_config *msg = (const struct nx_async_config *) oh;
+ uint32_t master[OAM_N_TYPES];
+ uint32_t slave[OAM_N_TYPES];
+
+ master[OAM_PACKET_IN] = ntohl(msg->packet_in_mask[0]);
+ master[OAM_PORT_STATUS] = ntohl(msg->port_status_mask[0]);
+ master[OAM_FLOW_REMOVED] = ntohl(msg->flow_removed_mask[0]);
+
+ slave[OAM_PACKET_IN] = ntohl(msg->packet_in_mask[1]);
+ slave[OAM_PORT_STATUS] = ntohl(msg->port_status_mask[1]);
+ slave[OAM_FLOW_REMOVED] = ntohl(msg->flow_removed_mask[1]);
+
+ ofconn_set_async_config(ofconn, master, slave);
+
+ return 0;
+}
+
static enum ofperr
handle_barrier_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
/* Nothing to do. */
return 0;
+ case OFPUTIL_NXT_SET_ASYNC_CONFIG:
+ return handle_nxt_set_async_config(ofconn, oh);
+
/* Statistics requests. */
case OFPUTIL_OFPST_DESC_REQUEST:
return handle_desc_stats_request(ofconn, msg->data);
HMAP_FOR_EACH (ofproto, hmap_node, &all_ofprotos) {
ds_put_format(&results, "%s\n", ofproto->name);
}
- unixctl_command_reply(conn, 200, ds_cstr(&results));
+ unixctl_command_reply(conn, ds_cstr(&results));
ds_destroy(&results);
}