};
/* rule. */
-static void ofproto_rule_destroy__(struct rule *);
static void ofproto_rule_send_removed(struct rule *, uint8_t reason);
static bool rule_is_modifiable(const struct rule *rule,
enum ofputil_flow_mod_flags flag);
static enum ofperr add_flow(struct ofproto *, struct ofconn *,
struct ofputil_flow_mod *,
const struct ofp_header *);
+static void do_add_flow(struct ofproto *, struct ofconn *,
+ const struct ofp_header *request, uint32_t buffer_id,
+ struct rule *);
static enum ofperr modify_flows__(struct ofproto *, struct ofconn *,
struct ofputil_flow_mod *,
const struct ofp_header *,
}
}
-/* Populates 'status' with key value pairs indicating the status of the BFD
- * session on 'ofp_port'. This information is intended to be populated in the
- * OVS database. Has no effect if 'ofp_port' is not na OpenFlow port in
- * 'ofproto'. */
+/* Populates 'status' with the status of BFD on 'ofport'. Returns 0 on
+ * success. Returns a negative number if there is no status change since
+ * last update. Returns a positive errno otherwise. Has no effect if
+ * 'ofp_port' is not an OpenFlow port in 'ofproto'.
+ *
+ * The caller must provide and own '*status'. */
int
ofproto_port_get_bfd_status(struct ofproto *ofproto, ofp_port_t ofp_port,
struct smap *status)
ovs_assert(list_is_empty(&ofproto->pending));
destroy_rule_executes(ofproto);
- guarded_list_destroy(&ofproto->rule_executes);
-
delete_group(ofproto, OFPG_ALL);
+
+ guarded_list_destroy(&ofproto->rule_executes);
ovs_rwlock_destroy(&ofproto->groups_rwlock);
hmap_destroy(&ofproto->groups);
}
p->ofproto_class->destruct(p);
- ofproto_destroy__(p);
+ /* Destroying rules is deferred, must have 'ofproto' around for them. */
+ ovsrcu_postpone(ofproto_destroy__, p);
}
/* Destroys the datapath with the respective 'name' and 'type'. With the Linux
* need this two-phase approach. */
sset_init(&devnames);
HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
- sset_add(&devnames, netdev_get_name(ofport->netdev));
+ uint64_t port_change_seq;
+
+ port_change_seq = netdev_get_change_seq(ofport->netdev);
+ if (ofport->change_seq != port_change_seq) {
+ ofport->change_seq = port_change_seq;
+ sset_add(&devnames, netdev_get_name(ofport->netdev));
+ }
}
SSET_FOR_EACH (devname, &devnames) {
update_port(p, devname);
return handle_flow_mod__(ofproto, NULL, fm, NULL);
}
+/* Resets the modified time for 'rule' or an equivalent rule. If 'rule' is not
+ * in the classifier, but an equivalent rule is, unref 'rule' and ref the new
+ * rule. Otherwise if 'rule' is no longer installed in the classifier,
+ * reinstall it.
+ *
+ * Returns the rule whose modified time has been reset. */
+struct rule *
+ofproto_refresh_rule(struct rule *rule)
+{
+ const struct oftable *table = &rule->ofproto->tables[rule->table_id];
+ const struct cls_rule *cr = &rule->cr;
+ struct rule *r;
+
+ /* do_add_flow() requires that the rule is not installed. We lock the
+ * ofproto_mutex here so that another thread cannot add the flow before
+ * we get a chance to add it.*/
+ ovs_mutex_lock(&ofproto_mutex);
+
+ fat_rwlock_rdlock(&table->cls.rwlock);
+ r = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, cr));
+ if (r != rule) {
+ ofproto_rule_ref(r);
+ }
+ fat_rwlock_unlock(&table->cls.rwlock);
+
+ if (!r) {
+ do_add_flow(rule->ofproto, NULL, NULL, 0, rule);
+ } else if (r != rule) {
+ ofproto_rule_unref(rule);
+ rule = r;
+ }
+ ovs_mutex_unlock(&ofproto_mutex);
+
+ /* Refresh the modified time for the rule. */
+ ovs_mutex_lock(&rule->mutex);
+ rule->modified = MAX(rule->modified, time_msec());
+ ovs_mutex_unlock(&rule->mutex);
+
+ return rule;
+}
+
/* Searches for a rule with matching criteria exactly equal to 'target' in
* ofproto's table 0 and, if it finds one, deletes it.
*
}
ofport->ofproto = p;
ofport->netdev = netdev;
+ ofport->change_seq = netdev_get_change_seq(netdev);
ofport->pp = *pp;
ofport->ofp_port = pp->port_no;
ofport->created = time_msec();
* Don't close the old netdev yet in case port_modified has to
* remove a retained reference to it.*/
port->netdev = netdev;
+ port->change_seq = netdev_get_change_seq(netdev);
if (port->ofproto->ofproto_class->port_modified) {
port->ofproto->ofproto_class->port_modified(port);
}
}
\f
+static void
+ofproto_rule_destroy__(struct rule *rule)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
+ rule_actions_destroy(rule_get_actions(rule));
+ ovs_mutex_destroy(&rule->mutex);
+ rule->ofproto->ofproto_class->rule_dealloc(rule);
+}
+
+static void
+rule_destroy_cb(struct rule *rule)
+{
+ rule->ofproto->ofproto_class->rule_destruct(rule);
+ ofproto_rule_destroy__(rule);
+}
+
void
ofproto_rule_ref(struct rule *rule)
{
}
}
+/* Decrements 'rule''s ref_count and schedules 'rule' to be destroyed if the
+ * ref_count reaches 0.
+ *
+ * Use of RCU allows short term use (between RCU quiescent periods) without
+ * keeping a reference. A reference must be taken if the rule needs to
+ * stay around accross the RCU quiescent periods. */
void
ofproto_rule_unref(struct rule *rule)
{
if (rule && ovs_refcount_unref(&rule->ref_count) == 1) {
- rule->ofproto->ofproto_class->rule_destruct(rule);
- ofproto_rule_destroy__(rule);
+ ovsrcu_postpone(rule_destroy_cb, rule);
}
}
-static void
-ofproto_rule_destroy__(struct rule *rule)
- OVS_NO_THREAD_SAFETY_ANALYSIS
-{
- cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
- rule_actions_destroy(rule_get_actions(rule));
- ovs_mutex_destroy(&rule->mutex);
- rule->ofproto->ofproto_class->rule_dealloc(rule);
-}
-
static uint32_t get_provider_meter_id(const struct ofproto *,
uint32_t of_meter_id);
ofproto->ofproto_class->get_netflow_ids(ofproto, engine_type, engine_id);
}
-/* Checks the status of CFM configured on 'ofp_port' within 'ofproto'. Returns
- * true if the port's CFM status was successfully stored into '*status'.
- * Returns false if the port did not have CFM configured, in which case
- * '*status' is indeterminate.
+/* Checks the status of CFM configured on 'ofp_port' within 'ofproto'.
+ * Returns 0 if the port's CFM status was successfully stored into
+ * '*status'. Returns positive errno if the port did not have CFM
+ * configured. Returns negative number if there is no status change
+ * since last update.
*
- * The caller must provide and owns '*status', and must free 'status->rmps'. */
-bool
+ * The caller must provide and own '*status', and must free 'status->rmps'.
+ * '*status' is indeterminate if the return value is non-zero. */
+int
ofproto_port_get_cfm_status(const struct ofproto *ofproto, ofp_port_t ofp_port,
struct ofproto_cfm_status *status)
{
struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
- return (ofport
- && ofproto->ofproto_class->get_cfm_status
- && ofproto->ofproto_class->get_cfm_status(ofport, status));
+ return (ofport && ofproto->ofproto_class->get_cfm_status
+ ? ofproto->ofproto_class->get_cfm_status(ofport, status)
+ : EOPNOTSUPP);
}
static enum ofperr
OVS_REQUIRES(ofproto_mutex)
{
struct oftable *table;
- struct ofopgroup *group;
struct cls_rule cr;
struct rule *rule;
uint8_t table_id;
}
/* Insert rule. */
+ do_add_flow(ofproto, ofconn, request, fm->buffer_id, rule);
+
+ return error;
+}
+
+static void
+do_add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
+ const struct ofp_header *request, uint32_t buffer_id,
+ struct rule *rule)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ struct ofopgroup *group;
+
oftable_insert_rule(rule);
- group = ofopgroup_create(ofproto, ofconn, request, fm->buffer_id);
+ group = ofopgroup_create(ofproto, ofconn, request, buffer_id);
ofoperation_create(group, rule, OFOPERATION_ADD, 0);
ofproto->ofproto_class->rule_insert(rule);
ofopgroup_submit(group);
-
- return error;
}
\f
/* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */