VLOG_DEFINE_THIS_MODULE(ofproto);
+COVERAGE_DEFINE(odp_overflow);
+COVERAGE_DEFINE(ofproto_add_wc_flow);
+COVERAGE_DEFINE(ofproto_agg_request);
+COVERAGE_DEFINE(ofproto_costly_flags);
+COVERAGE_DEFINE(ofproto_ctlr_action);
+COVERAGE_DEFINE(ofproto_del_wc_flow);
+COVERAGE_DEFINE(ofproto_dp_missed);
+COVERAGE_DEFINE(ofproto_error);
+COVERAGE_DEFINE(ofproto_expiration);
+COVERAGE_DEFINE(ofproto_expired);
+COVERAGE_DEFINE(ofproto_flows_req);
+COVERAGE_DEFINE(ofproto_flush);
+COVERAGE_DEFINE(ofproto_invalidated);
+COVERAGE_DEFINE(ofproto_mod_wc_flow);
+COVERAGE_DEFINE(ofproto_no_packet_in);
+COVERAGE_DEFINE(ofproto_odp_unchanged);
+COVERAGE_DEFINE(ofproto_ofconn_stuck);
+COVERAGE_DEFINE(ofproto_ofp2odp);
+COVERAGE_DEFINE(ofproto_packet_in);
+COVERAGE_DEFINE(ofproto_packet_out);
+COVERAGE_DEFINE(ofproto_queue_req);
+COVERAGE_DEFINE(ofproto_recv_openflow);
+COVERAGE_DEFINE(ofproto_reinit_ports);
+COVERAGE_DEFINE(ofproto_revalidate);
+COVERAGE_DEFINE(ofproto_revalidate_moved);
+COVERAGE_DEFINE(ofproto_revalidate_rule);
+COVERAGE_DEFINE(ofproto_subrule_create);
+COVERAGE_DEFINE(ofproto_unexpected_rule);
+COVERAGE_DEFINE(ofproto_uninstallable);
+COVERAGE_DEFINE(ofproto_update_port);
+
#include "sflow_api.h"
struct ofport {
long long int next_in_band_update;
struct sockaddr_in *extra_in_band_remotes;
size_t n_extra_remotes;
+ int in_band_queue;
/* Flow table. */
struct classifier cls;
/* Initialize submodules. */
p->switch_status = switch_status_create(p);
- p->in_band = NULL;
p->fail_open = NULL;
p->netflow = NULL;
p->sflow = NULL;
+ /* Initialize in-band control. */
+ p->in_band = NULL;
+ p->in_band_queue = -1;
+
/* Initialize flow table. */
classifier_init(&p->cls);
p->next_expiration = time_msec() + 1000;
if (ofproto->in_band) {
in_band_set_remotes(ofproto->in_band, addrs, n_addrs);
}
+ in_band_set_queue(ofproto->in_band, ofproto->in_band_queue);
ofproto->next_in_band_update = time_msec() + 1000;
} else {
in_band_destroy(ofproto->in_band);
update_in_band_remotes(ofproto);
}
+/* Sets the OpenFlow queue used by flows set up by in-band control on
+ * 'ofproto' to 'queue_id'. If 'queue_id' is negative, then in-band control
+ * flows will use the default queue. */
+void
+ofproto_set_in_band_queue(struct ofproto *ofproto, int queue_id)
+{
+ if (queue_id != ofproto->in_band_queue) {
+ ofproto->in_band_queue = queue_id;
+ update_in_band_remotes(ofproto);
+ }
+}
+
void
ofproto_set_desc(struct ofproto *p,
const char *mfr_desc, const char *hw_desc,
}
}
-static void
-destroy_rule(struct cls_rule *rule_, void *ofproto_)
-{
- struct rule *rule = rule_from_cls_rule(rule_);
- struct ofproto *ofproto = ofproto_;
-
- rule_remove(ofproto, rule);
-}
-
void
ofproto_flush_flows(struct ofproto *ofproto)
{
struct facet *facet, *next_facet;
+ struct rule *rule, *next_rule;
+ struct cls_cursor cursor;
COVERAGE_INC(ofproto_flush);
facet->installed = false;
facet_remove(ofproto, facet);
}
- classifier_for_each(&ofproto->cls, destroy_rule, ofproto);
+
+ cls_cursor_init(&cursor, &ofproto->cls, NULL);
+ CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
+ rule_remove(ofproto, rule);
+ }
+
dpif_flow_flush(ofproto->dpif);
if (ofproto->in_band) {
in_band_flushed(ofproto->in_band);
struct ofp_port_status *ops;
struct ofpbuf *b;
- if (!ofconn_receives_async_msgs(ofconn)) {
+ /* Primary controllers, even slaves, should always get port status
+ updates. Otherwise obey ofconn_receives_async_msgs(). */
+ if (ofconn->type != OFCONN_PRIMARY
+ && !ofconn_receives_async_msgs(ofconn)) {
continue;
}
return;
} else if (old_ofport && new_ofport) {
/* Most of the 'config' bits are OpenFlow soft state, but
- * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
- * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
- * leaves the other bits 0.) */
+ * OFPPC_PORT_DOWN is maintained by the kernel. So transfer the
+ * OpenFlow bits from old_ofport. (make_ofport() only sets
+ * OFPPC_PORT_DOWN and leaves the other bits 0.) */
new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
if (ofport_equal(old_ofport, new_ofport)) {
static void
xlate_set_dl_tci(struct action_xlate_ctx *ctx)
{
- ovs_be16 dl_vlan = ctx->flow.dl_vlan;
- uint8_t dl_vlan_pcp = ctx->flow.dl_vlan_pcp;
-
- if (dl_vlan == htons(OFP_VLAN_NONE)) {
+ ovs_be16 tci = ctx->flow.vlan_tci;
+ if (!(tci & htons(VLAN_CFI))) {
odp_actions_add(ctx->out, ODPAT_STRIP_VLAN);
} else {
union odp_action *oa = odp_actions_add(ctx->out, ODPAT_SET_DL_TCI);
- oa->dl_tci.tci = htons(ntohs(dl_vlan & htons(VLAN_VID_MASK))
- | (dl_vlan_pcp << VLAN_PCP_SHIFT)
- | VLAN_CFI);
+ oa->dl_tci.tci = tci & ~htons(VLAN_CFI);
}
}
xlate_reg_move_action(struct action_xlate_ctx *ctx,
const struct nx_action_reg_move *narm)
{
- ovs_be16 old_vlan = ctx->flow.dl_vlan;
- uint8_t old_pcp = ctx->flow.dl_vlan_pcp;
+ ovs_be16 old_tci = ctx->flow.vlan_tci;
nxm_execute_reg_move(narm, &ctx->flow);
- if (ctx->flow.dl_vlan != old_vlan || ctx->flow.dl_vlan_pcp != old_pcp) {
+ if (ctx->flow.vlan_tci != old_tci) {
xlate_set_dl_tci(ctx);
}
}
case NXAST_REG_LOAD:
nxm_execute_reg_load((const struct nx_action_reg_load *) nah,
&ctx->flow);
+
+ case NXAST_NOTE:
+ /* Nothing to do. */
break;
/* If you add a new action here that modifies flow data, don't forget to
break;
case OFPAT_SET_VLAN_VID:
- ctx->flow.dl_vlan = ia->vlan_vid.vlan_vid;
+ ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
+ ctx->flow.vlan_tci |= ia->vlan_vid.vlan_vid | htons(VLAN_CFI);
xlate_set_dl_tci(ctx);
break;
case OFPAT_SET_VLAN_PCP:
- ctx->flow.dl_vlan_pcp = ia->vlan_pcp.vlan_pcp;
+ ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
+ ctx->flow.vlan_tci |= htons(
+ (ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
xlate_set_dl_tci(ctx);
break;
case OFPAT_STRIP_VLAN:
- ctx->flow.dl_vlan = htons(OFP_VLAN_NONE);
- ctx->flow.dl_vlan_pcp = 0;
+ ctx->flow.vlan_tci = htons(0);
xlate_set_dl_tci(ctx);
break;
return 0;
}
-struct flow_stats_cbdata {
- struct ofconn *ofconn;
- ovs_be16 out_port;
- struct ofpbuf *msg;
-};
-
/* Obtains statistic counters for 'rule' within 'p' and stores them into
* '*packet_countp' and '*byte_countp'. The returned statistics include
* statistics for all of 'rule''s facets. */
}
static void
-flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
+put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule,
+ ovs_be16 out_port, struct ofpbuf **replyp)
{
- struct rule *rule = rule_from_cls_rule(rule_);
- struct flow_stats_cbdata *cbdata = cbdata_;
struct ofp_flow_stats *ofs;
uint64_t packet_count, byte_count;
size_t act_len, len;
- if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
+ if (rule_is_hidden(rule) || !rule_has_out_port(rule, out_port)) {
return;
}
act_len = sizeof *rule->actions * rule->n_actions;
len = offsetof(struct ofp_flow_stats, actions) + act_len;
- query_stats(cbdata->ofconn->ofproto, rule, &packet_count, &byte_count);
+ query_stats(ofconn->ofproto, rule, &packet_count, &byte_count);
- ofs = append_ofp_stats_reply(len, cbdata->ofconn, &cbdata->msg);
+ ofs = append_ofp_stats_reply(len, ofconn, replyp);
ofs->length = htons(len);
ofs->table_id = 0;
ofs->pad = 0;
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
- cbdata->ofconn->flow_format, &ofs->match);
+ ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofs->match);
calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec);
ofs->cookie = rule->flow_cookie;
ofs->priority = htons(rule->cr.priority);
const struct ofp_stats_request *osr, size_t arg_size)
{
struct ofp_flow_stats_request *fsr;
- struct flow_stats_cbdata cbdata;
+ struct ofpbuf *reply;
if (arg_size != sizeof *fsr) {
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
fsr = (struct ofp_flow_stats_request *) osr->body;
COVERAGE_INC(ofproto_flows_req);
- cbdata.msg = start_ofp_stats_reply(osr, 1024);
+ reply = start_ofp_stats_reply(osr, 1024);
if (is_valid_table(fsr->table_id)) {
+ struct cls_cursor cursor;
struct cls_rule target;
+ struct rule *rule;
- cbdata.ofconn = ofconn;
- cbdata.out_port = fsr->out_port;
- cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0, &target);
- classifier_for_each_match(&ofconn->ofproto->cls, &target,
- flow_stats_cb, &cbdata);
+ ofputil_cls_rule_from_match(&fsr->match, 0, NXFF_OPENFLOW10, 0,
+ &target);
+ cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target);
+ CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+ put_ofp_flow_stats(ofconn, rule, fsr->out_port, &reply);
+ }
}
+ queue_tx(reply, ofconn, ofconn->reply_counter);
- queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
return 0;
}
static void
-nx_flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
+put_nx_flow_stats(struct ofconn *ofconn, struct rule *rule,
+ ovs_be16 out_port, struct ofpbuf **replyp)
{
- struct rule *rule = rule_from_cls_rule(rule_);
- struct flow_stats_cbdata *cbdata = cbdata_;
struct nx_flow_stats *nfs;
uint64_t packet_count, byte_count;
size_t act_len, start_len;
+ struct ofpbuf *reply;
- if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
+ if (rule_is_hidden(rule) || !rule_has_out_port(rule, out_port)) {
return;
}
- query_stats(cbdata->ofconn->ofproto, rule, &packet_count, &byte_count);
+ query_stats(ofconn->ofproto, rule, &packet_count, &byte_count);
act_len = sizeof *rule->actions * rule->n_actions;
- start_len = cbdata->msg->size;
- append_nxstats_reply(sizeof *nfs + NXM_MAX_LEN + act_len,
- cbdata->ofconn, &cbdata->msg);
- nfs = ofpbuf_put_uninit(cbdata->msg, sizeof *nfs);
+ start_len = (*replyp)->size;
+ append_nxstats_reply(sizeof *nfs + NXM_MAX_LEN + act_len, ofconn, replyp);
+ reply = *replyp;
+
+ nfs = ofpbuf_put_uninit(reply, sizeof *nfs);
nfs->table_id = 0;
nfs->pad = 0;
calc_flow_duration(rule->created, &nfs->duration_sec, &nfs->duration_nsec);
nfs->priority = htons(rule->cr.priority);
nfs->idle_timeout = htons(rule->idle_timeout);
nfs->hard_timeout = htons(rule->hard_timeout);
- nfs->match_len = htons(nx_put_match(cbdata->msg, &rule->cr));
+ nfs->match_len = htons(nx_put_match(reply, &rule->cr));
memset(nfs->pad2, 0, sizeof nfs->pad2);
nfs->packet_count = htonll(packet_count);
nfs->byte_count = htonll(byte_count);
if (rule->n_actions > 0) {
- ofpbuf_put(cbdata->msg, rule->actions, act_len);
+ ofpbuf_put(reply, rule->actions, act_len);
}
- nfs->length = htons(cbdata->msg->size - start_len);
+ nfs->length = htons(reply->size - start_len);
}
static int
handle_nxst_flow(struct ofconn *ofconn, struct ofpbuf *b)
{
struct nx_flow_stats_request *nfsr;
- struct flow_stats_cbdata cbdata;
struct cls_rule target;
+ struct ofpbuf *reply;
int error;
/* Dissect the message. */
}
COVERAGE_INC(ofproto_flows_req);
- cbdata.msg = start_nxstats_reply(&nfsr->nsm, 1024);
+ reply = start_nxstats_reply(&nfsr->nsm, 1024);
if (is_valid_table(nfsr->table_id)) {
- cbdata.ofconn = ofconn;
- cbdata.out_port = nfsr->out_port;
- classifier_for_each_match(&ofconn->ofproto->cls, &target,
- nx_flow_stats_cb, &cbdata);
+ struct cls_cursor cursor;
+ struct rule *rule;
+
+ cls_cursor_init(&cursor, &ofconn->ofproto->cls, &target);
+ CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+ put_nx_flow_stats(ofconn, rule, nfsr->out_port, &reply);
+ }
}
- queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
+ queue_tx(reply, ofconn, ofconn->reply_counter);
+
return 0;
}
-struct flow_stats_ds_cbdata {
- struct ofproto *ofproto;
- struct ds *results;
-};
-
static void
-flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
+flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results)
{
- struct rule *rule = rule_from_cls_rule(rule_);
- struct flow_stats_ds_cbdata *cbdata = cbdata_;
- struct ds *results = cbdata->results;
struct ofp_match match;
uint64_t packet_count, byte_count;
size_t act_len = sizeof *rule->actions * rule->n_actions;
- query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards,
- NXFF_OPENFLOW10, &match);
+ query_stats(ofproto, rule, &packet_count, &byte_count);
+ ofputil_cls_rule_to_match(&rule->cr, NXFF_OPENFLOW10, &match);
ds_put_format(results, "duration=%llds, ",
(time_msec() - rule->created) / 1000);
void
ofproto_get_all_flows(struct ofproto *p, struct ds *results)
{
- struct ofp_match match;
- struct cls_rule target;
- struct flow_stats_ds_cbdata cbdata;
-
- memset(&match, 0, sizeof match);
- match.wildcards = htonl(OVSFW_ALL);
-
- cbdata.ofproto = p;
- cbdata.results = results;
-
- cls_rule_from_match(&match, 0, NXFF_OPENFLOW10, 0, &target);
- classifier_for_each_match(&p->cls, &target, flow_stats_ds_cb, &cbdata);
-}
-
-struct aggregate_stats_cbdata {
- struct ofproto *ofproto;
- ovs_be16 out_port;
- uint64_t packet_count;
- uint64_t byte_count;
- uint32_t n_flows;
-};
-
-static void
-aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
-{
- struct rule *rule = rule_from_cls_rule(rule_);
- struct aggregate_stats_cbdata *cbdata = cbdata_;
- uint64_t packet_count, byte_count;
+ struct cls_cursor cursor;
+ struct rule *rule;
- if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
- return;
+ cls_cursor_init(&cursor, &p->cls, NULL);
+ CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+ flow_stats_ds(p, rule, results);
}
-
- query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
-
- cbdata->packet_count += packet_count;
- cbdata->byte_count += byte_count;
- cbdata->n_flows++;
}
static void
ovs_be16 out_port, uint8_t table_id,
struct ofp_aggregate_stats_reply *oasr)
{
- struct aggregate_stats_cbdata cbdata;
+ uint64_t total_packets = 0;
+ uint64_t total_bytes = 0;
+ int n_flows = 0;
COVERAGE_INC(ofproto_agg_request);
- cbdata.packet_count = 0;
- cbdata.byte_count = 0;
- cbdata.n_flows = 0;
+
if (is_valid_table(table_id)) {
- cbdata.ofproto = ofproto;
- cbdata.out_port = out_port;
+ struct cls_cursor cursor;
+ struct rule *rule;
+
+ cls_cursor_init(&cursor, &ofproto->cls, target);
+ CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+ if (!rule_is_hidden(rule) && rule_has_out_port(rule, out_port)) {
+ uint64_t packet_count;
+ uint64_t byte_count;
+
+ query_stats(ofproto, rule, &packet_count, &byte_count);
- classifier_for_each_match(&ofproto->cls, target,
- aggregate_stats_cb, &cbdata);
+ total_packets += packet_count;
+ total_bytes += byte_count;
+ n_flows++;
+ }
+ }
}
- oasr->flow_count = htonl(cbdata.n_flows);
- oasr->packet_count = htonll(cbdata.packet_count);
- oasr->byte_count = htonll(cbdata.byte_count);
+ oasr->flow_count = htonl(n_flows);
+ oasr->packet_count = htonll(total_packets);
+ oasr->byte_count = htonll(total_bytes);
memset(oasr->pad, 0, sizeof oasr->pad);
}
}
request = (struct ofp_aggregate_stats_request *) osr->body;
- cls_rule_from_match(&request->match, 0, NXFF_OPENFLOW10, 0, &target);
+ ofputil_cls_rule_from_match(&request->match, 0, NXFF_OPENFLOW10, 0,
+ &target);
msg = start_ofp_stats_reply(osr, sizeof *reply);
reply = append_ofp_stats_reply(sizeof *reply, ofconn, &msg);
static int modify_flow(struct ofproto *, const struct flow_mod *,
struct rule *);
-static void modify_flows_cb(struct cls_rule *, void *cbdata_);
/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code as
* encoded by ofp_mkerr() on failure.
static int
modify_flows_loose(struct ofconn *ofconn, struct flow_mod *fm)
{
- struct modify_flows_cbdata cbdata;
+ struct ofproto *p = ofconn->ofproto;
+ struct rule *match = NULL;
+ struct cls_cursor cursor;
+ struct rule *rule;
- cbdata.ofproto = ofconn->ofproto;
- cbdata.fm = fm;
- cbdata.match = NULL;
+ cls_cursor_init(&cursor, &p->cls, &fm->cr);
+ CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+ if (!rule_is_hidden(rule)) {
+ match = rule;
+ modify_flow(p, fm, rule);
+ }
+ }
- classifier_for_each_match(&ofconn->ofproto->cls, &fm->cr,
- modify_flows_cb, &cbdata);
- if (cbdata.match) {
- /* This credits the packet to whichever flow happened to happened to
- * match last. That's weird. Maybe we should do a lookup for the
- * flow that actually matches the packet? Who knows. */
- send_buffered_packet(ofconn, cbdata.match, fm->buffer_id);
+ if (match) {
+ /* This credits the packet to whichever flow happened to match last.
+ * That's weird. Maybe we should do a lookup for the flow that
+ * actually matches the packet? Who knows. */
+ send_buffered_packet(ofconn, match, fm->buffer_id);
return 0;
} else {
return add_flow(ofconn, fm);
}
}
-/* Callback for modify_flows_loose(). */
-static void
-modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
-{
- struct rule *rule = rule_from_cls_rule(rule_);
- struct modify_flows_cbdata *cbdata = cbdata_;
-
- if (!rule_is_hidden(rule)) {
- cbdata->match = rule;
- modify_flow(cbdata->ofproto, cbdata->fm, rule);
- }
-}
-
/* Implements core of OFPFC_MODIFY and OFPFC_MODIFY_STRICT where 'rule' has
* been identified as a flow in 'p''s flow table to be modified, by changing
* the rule's actions to match those in 'ofm' (which is followed by 'n_actions'
\f
/* OFPFC_DELETE implementation. */
-struct delete_flows_cbdata {
- struct ofproto *ofproto;
- ovs_be16 out_port;
-};
-
-static void delete_flows_cb(struct cls_rule *, void *cbdata_);
static void delete_flow(struct ofproto *, struct rule *, ovs_be16 out_port);
/* Implements OFPFC_DELETE. */
static void
delete_flows_loose(struct ofproto *p, const struct flow_mod *fm)
{
- struct delete_flows_cbdata cbdata;
-
- cbdata.ofproto = p;
- cbdata.out_port = htons(fm->out_port);
+ struct rule *rule, *next_rule;
+ struct cls_cursor cursor;
- classifier_for_each_match(&p->cls, &fm->cr, delete_flows_cb, &cbdata);
+ cls_cursor_init(&cursor, &p->cls, &fm->cr);
+ CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
+ delete_flow(p, rule, htons(fm->out_port));
+ }
}
/* Implements OFPFC_DELETE_STRICT. */
}
}
-/* Callback for delete_flows_loose(). */
-static void
-delete_flows_cb(struct cls_rule *rule_, void *cbdata_)
-{
- struct rule *rule = rule_from_cls_rule(rule_);
- struct delete_flows_cbdata *cbdata = cbdata_;
-
- delete_flow(cbdata->ofproto, rule, cbdata->out_port);
-}
-
/* Implements core of OFPFC_DELETE and OFPFC_DELETE_STRICT where 'rule' has
* been identified as a flow to delete from 'p''s flow table, by deleting the
* flow and sending out a OFPT_FLOW_REMOVED message to any interested
}
/* Translate the message. */
- cls_rule_from_match(&ofm->match, ntohs(ofm->priority), ofconn->flow_format,
- ofm->cookie, &fm.cr);
+ ofputil_cls_rule_from_match(&ofm->match, ntohs(ofm->priority),
+ ofconn->flow_format, ofm->cookie, &fm.cr);
fm.cookie = ofm->cookie;
fm.command = ntohs(ofm->command);
fm.idle_timeout = ntohs(ofm->idle_timeout);
payload.size = msg->length - sizeof *msg;
flow_extract(&payload, msg->arg, msg->port, &flow);
+ packet->l2 = payload.l2;
+ packet->l3 = payload.l3;
+ packet->l4 = payload.l4;
+ packet->l7 = payload.l7;
+
/* Check with in-band control to see if this packet should be sent
* to the local port regardless of the flow table. */
if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
\f
/* Flow expiration. */
-struct expire_cbdata {
- struct ofproto *ofproto;
- int dp_max_idle;
-};
-
static int ofproto_dp_max_idle(const struct ofproto *);
static void ofproto_update_used(struct ofproto *);
-static void rule_expire(struct cls_rule *, void *cbdata);
+static void rule_expire(struct ofproto *, struct rule *);
static void ofproto_expire_facets(struct ofproto *, int dp_max_idle);
/* This function is called periodically by ofproto_run(). Its job is to
static int
ofproto_expire(struct ofproto *ofproto)
{
- struct expire_cbdata cbdata;
+ struct rule *rule, *next_rule;
+ struct cls_cursor cursor;
+ int dp_max_idle;
/* Update 'used' for each flow in the datapath. */
ofproto_update_used(ofproto);
/* Expire facets that have been idle too long. */
- cbdata.dp_max_idle = ofproto_dp_max_idle(ofproto);
- ofproto_expire_facets(ofproto, cbdata.dp_max_idle);
+ dp_max_idle = ofproto_dp_max_idle(ofproto);
+ ofproto_expire_facets(ofproto, dp_max_idle);
/* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
- cbdata.ofproto = ofproto;
- classifier_for_each(&ofproto->cls, rule_expire, &cbdata);
+ cls_cursor_init(&cursor, &ofproto->cls, NULL);
+ CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
+ rule_expire(ofproto, rule);
+ }
/* Let the hook know that we're at a stable point: all outstanding data
* in existing flows has been accounted to the account_cb. Thus, the
ofproto->ofhooks->account_checkpoint_cb(ofproto->aux);
}
- return MIN(cbdata.dp_max_idle, 1000);
+ return MIN(dp_max_idle, 1000);
}
/* Update 'used' member of installed facets. */
}
}
-/* If 'cls_rule' is an OpenFlow rule, that has expired according to OpenFlow
- * rules, then delete it entirely.
- *
- * (This is a callback function for classifier_for_each().) */
+/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
+ * then delete it entirely. */
static void
-rule_expire(struct cls_rule *cls_rule, void *cbdata_)
+rule_expire(struct ofproto *ofproto, struct rule *rule)
{
- struct expire_cbdata *cbdata = cbdata_;
- struct rule *rule = rule_from_cls_rule(cls_rule);
struct facet *facet, *next_facet;
long long int now;
uint8_t reason;
/* Update stats. (This is a no-op if the rule expired due to an idle
* timeout, because that only happens when the rule has no facets left.) */
LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
- facet_remove(cbdata->ofproto, facet);
+ facet_remove(ofproto, facet);
}
/* Get rid of the rule. */
if (!rule_is_hidden(rule)) {
- rule_send_removed(cbdata->ofproto, rule, reason);
+ rule_send_removed(ofproto, rule, reason);
}
- rule_remove(cbdata->ofproto, rule);
+ rule_remove(ofproto, rule);
}
\f
static struct ofpbuf *
struct ofpbuf *buf;
ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, ofconn->flow_format,
- &ofr->match);
+ ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofr->match);
ofr->cookie = rule->flow_cookie;
ofr->priority = htons(rule->cr.priority);
ofr->reason = reason;