/*
- * Copyright (c) 2009, 2010, 2011 Nicira Networks.
+ * Copyright (c) 2009, 2010, 2011, 2012 Nicira Networks.
* Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
}
}
+/* Sets the MAC aging timeout for the OFPP_NORMAL action on 'ofproto' to
+ * 'idle_time', in seconds. */
+void
+ofproto_set_mac_idle_time(struct ofproto *ofproto, unsigned idle_time)
+{
+ if (ofproto->ofproto_class->set_mac_idle_time) {
+ ofproto->ofproto_class->set_mac_idle_time(ofproto, idle_time);
+ }
+}
+
void
ofproto_set_desc(struct ofproto *p,
const char *mfr_desc, const char *hw_desc,
\f
/* Registers a mirror associated with client data pointer 'aux' in 'ofproto'.
* If 'aux' is already registered then this function updates its configuration
- * to 's'. Otherwise, this function registers a new mirror.
- *
- * Mirrors affect only the treatment of packets output to the OFPP_NORMAL
- * port. */
+ * to 's'. Otherwise, this function registers a new mirror. */
int
ofproto_mirror_register(struct ofproto *ofproto, void *aux,
const struct ofproto_mirror_settings *s)
return ofproto_mirror_register(ofproto, aux, NULL);
}
+/* Retrieves statistics from mirror associated with client data pointer
+ * 'aux' in 'ofproto'. Stores packet and byte counts in 'packets' and
+ * 'bytes', respectively. If a particular counters is not supported,
+ * the appropriate argument is set to UINT64_MAX. */
+int
+ofproto_mirror_get_stats(struct ofproto *ofproto, void *aux,
+ uint64_t *packets, uint64_t *bytes)
+{
+ if (!ofproto->ofproto_class->mirror_get_stats) {
+ *packets = *bytes = UINT64_MAX;
+ return EOPNOTSUPP;
+ }
+
+ return ofproto->ofproto_class->mirror_get_stats(ofproto, aux,
+ packets, bytes);
+}
+
/* Configures the VLANs whose bits are set to 1 in 'flood_vlans' as VLANs on
* which all packets are flooded, instead of using MAC learning. If
* 'flood_vlans' is NULL, then MAC learning applies to all VLANs.
int
ofproto_run(struct ofproto *p)
{
+ struct sset changed_netdevs;
+ const char *changed_netdev;
struct ofport *ofport;
- char *devname;
int error;
error = p->ofproto_class->run(p);
- if (error == ENODEV) {
- /* Someone destroyed the datapath behind our back. The caller
- * better destroy us and give up, because we're just going to
- * spin from here on out. */
- static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_ERR_RL(&rl2, "%s: datapath was destroyed externally",
- p->name);
- return ENODEV;
+ if (error && error != EAGAIN) {
+ VLOG_ERR_RL(&rl, "%s: run failed (%s)", p->name, strerror(error));
}
if (p->ofproto_class->port_poll) {
+ char *devname;
+
while ((error = p->ofproto_class->port_poll(p, &devname)) != EAGAIN) {
process_port_change(p, error, devname);
}
}
+ /* Update OpenFlow port status for any port whose netdev has changed.
+ *
+ * Refreshing a given 'ofport' can cause an arbitrary ofport to be
+ * destroyed, so it's not safe to update ports directly from the
+ * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we
+ * need this two-phase approach. */
+ sset_init(&changed_netdevs);
HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
unsigned int change_seq = netdev_change_seq(ofport->netdev);
if (ofport->change_seq != change_seq) {
ofport->change_seq = change_seq;
- update_port(p, netdev_get_name(ofport->netdev));
+ sset_add(&changed_netdevs, netdev_get_name(ofport->netdev));
}
}
+ SSET_FOR_EACH (changed_netdev, &changed_netdevs) {
+ update_port(p, changed_netdev);
+ }
+ sset_destroy(&changed_netdevs);
switch (p->state) {
NOT_REACHED();
}
- return 0;
+ return error;
+}
+
+/* Performs periodic activity required by 'ofproto' that needs to be done
+ * with the least possible latency.
+ *
+ * It makes sense to call this function a couple of times per poll loop, to
+ * provide a significant performance boost on some benchmarks with the
+ * ofproto-dpif implementation. */
+int
+ofproto_run_fast(struct ofproto *p)
+{
+ int error;
+
+ error = p->ofproto_class->run_fast ? p->ofproto_class->run_fast(p) : 0;
+ if (error && error != EAGAIN) {
+ VLOG_ERR_RL(&rl, "%s: fastpath run failed (%s)",
+ p->name, strerror(error));
+ }
+ return error;
}
void
shash_add(&p->port_by_name, netdev_name, ofport);
if (!netdev_get_mtu(netdev, &dev_mtu)) {
- set_internal_devs_mtu(p);
ofport->mtu = dev_mtu;
+ set_internal_devs_mtu(p);
} else {
ofport->mtu = 0;
}
if (op->victim) {
ofproto_rule_destroy__(op->victim);
}
- if (!(rule->cr.wc.vlan_tci_mask & htons(VLAN_VID_MASK))
- && ofproto->vlan_bitmap) {
- uint16_t vid = vlan_tci_to_vid(rule->cr.flow.vlan_tci);
-
- if (!bitmap_is_set(ofproto->vlan_bitmap, vid)) {
- bitmap_set1(ofproto->vlan_bitmap, vid);
+ if ((rule->cr.wc.vlan_tci_mask & htons(VLAN_VID_MASK))
+ == htons(VLAN_VID_MASK)) {
+ if (ofproto->vlan_bitmap) {
+ uint16_t vid = vlan_tci_to_vid(rule->cr.flow.vlan_tci);
+
+ if (!bitmap_is_set(ofproto->vlan_bitmap, vid)) {
+ bitmap_set1(ofproto->vlan_bitmap, vid);
+ ofproto->vlans_changed = true;
+ }
+ } else {
ofproto->vlans_changed = true;
}
}
const struct cls_table *table;
HMAP_FOR_EACH (table, hmap_node, &cls->tables) {
- if (!(table->wc.vlan_tci_mask & htons(VLAN_VID_MASK))) {
+ if ((table->wc.vlan_tci_mask & htons(VLAN_VID_MASK))
+ == htons(VLAN_VID_MASK)) {
const struct cls_rule *rule;
HMAP_FOR_EACH (rule, hmap_node, &table->rules) {